query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Prepare the files and organizes the directories Returns a dictionary of directory keys and their respective locations
def prepare_directories() -> dict: original_dataset_dir = os.path.join(data_dir, 'original') original_train_dir = os.path.join(original_dataset_dir, 'train') original_test_dir = os.path.join(original_dataset_dir, 'test1') base_dir = os.path.join(data_dir, 'cats_and_dogs_small') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') test_dir = os.path.join(base_dir, 'test') train_cats_dir = os.path.join(train_dir, 'cats') train_dogs_dir = os.path.join(train_dir, 'dogs') validation_cats_dir = os.path.join(validation_dir, 'cats') validation_dogs_dir = os.path.join(validation_dir, 'dogs') test_cats_dir = os.path.join(test_dir, 'cats') test_dogs_dir = os.path.join(test_dir, 'dogs') # creates the directories if os.path.isdir(base_dir): shutil.rmtree(base_dir) os.mkdir(base_dir) os.mkdir(train_dir) os.mkdir(validation_dir) os.mkdir(test_dir) os.mkdir(train_cats_dir) os.mkdir(train_dogs_dir) os.mkdir(validation_cats_dir) os.mkdir(validation_dogs_dir) os.mkdir(test_dogs_dir) os.mkdir(test_cats_dir) dirs = {'original_dataset': original_dataset_dir, 'original_train': original_train_dir, 'original_test': original_test_dir, 'train': train_dir, 'validation': validation_dir, 'test': test_dir, 'train_cats': train_cats_dir, 'validation_cats': validation_cats_dir, 'test_cats': test_cats_dir, 'train_dogs': train_dogs_dir, 'validation_dogs': validation_dogs_dir, 'test_dogs': test_dogs_dir} return dirs
[ "def file_folder_specs():\n\n root = 'D:\\KST\\proj\\template\\template'\n files_folders = {\n 'root' : root,\n #'data' : os.path.abspath(\n #os.path.join(root, 'data')\n #)\n }\n\n # we can also check for existence here to put everything in place\n #if not os.path.exists(files_folders[data]): os.makedirs(files_folders[data])\n\n return files_folders", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % self.location\n ensure_path(self.conf_path)\n for subdir in config.DATA_DIRS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def _setup_dirstructure() -> None:\n dirs = [\n DIR_STORAGE,\n os.path.join(DIR_STORAGE, \"data\", \"datasets\"),\n os.path.join(DIR_STORAGE, \"data\", \"geometries\"),\n os.path.join(DIR_STORAGE, \"data\", \"raw\"),\n os.path.join(DIR_STORAGE, \"data\", \"tables\"),\n os.path.join(DIR_STORAGE, \"logs\"),\n os.path.join(DIR_STORAGE, \"logs\"),\n os.path.join(DIR_STORAGE, \"models\"),\n os.path.join(DIR_STORAGE, \"pipeline\", \"predictions\"),\n os.path.join(DIR_STORAGE, \"scratch\"),\n ]\n for path_dir in dirs:\n utils.io.create_directory(path_dir)", "def build_folders_dict():\n all_folders = [x for x in os.listdir(INPUT_DIR) if os.path.isdir(INPUT_DIR + x)]\n folder_dict = defaultdict(list)\n for folder in all_folders:\n folder_initial = folder[0].upper()\n assert folder_initial in VAL_INIT, \"folder initial not in VAL_INIT {}\".format(folder_initial)\n folder_dict[folder_initial].append(folder)\n\n return folder_dict", "def walk(self, base_dir):\n assert isinstance(base_dir, str), 'base_dir must be a string type'\n if not os.path.exists(base_dir):\n print \"base directory tree does not exist:\\n\\t%s\" % (base_dir,)\n return dict(self.data)\n # walk the base_dir, we don't care about directory names.\n for root, _, files in os.walk(base_dir):\n for fn in files:\n # build the full pile path\n fp = os.path.join(root, fn)\n # process the file data to get the hash key\n key = process_file(fp, self.read_size)\n # check to see if we have seen this hash before\n if key in self.data:\n \"\"\" if we have seen this key before this file is a dup.\n add the key to the dup set\n \"\"\"\n self.dup_keys.add(key)\n # add the key and the data dict\n self.data[key].append(fp)\n return dict(self.data)", "def _generate_dict_files(self):\n\n files = {}\n\n folder = os.path.join(self.res, 'dictionary')\n\n for preprocess in self.preproc:\n file = \"dictionary_{}.bin\".format(preprocess)\n files[preprocess] = os.path.join(folder, file)\n\n return files", "def _group_files_by_dir(\n source_list: List[str]) -> Tuple[Dict[str, List[str]], List[str]]:\n grouped_files: Dict[str, List[str]] = {}\n dirs = []\n for source in source_list:\n source = os.path.abspath(os.path.expanduser(source))\n if os.path.isdir(source):\n dirs.append(source)\n else:\n base_path = os.path.dirname(source)\n file_name = os.path.basename(source)\n if base_path not in grouped_files:\n grouped_files[base_path] = []\n grouped_files[base_path].append(file_name)\n return grouped_files, dirs", "def _generate_data_files(self):\n\n files = {}\n\n # inits\n for category in self.classes:\n files[category] = {}\n files[category]['divided'] = {}\n files[category]['preprocessed'] = {}\n files[category]['transformed'] = {}\n files[category]['classified'] = {}\n\n # extracted data\n folder = os.path.join(self.res, 'data', category, 'extracted')\n file = \"{}.json\".format(category)\n\n files[category]['extracted'] = os.path.join(folder, file)\n\n # divided data\n folder = os.path.join(self.res, 'data', category, 'divided')\n\n for subset in self.subsets:\n file = \"{}_{}.json\".format(category, subset)\n files[category]['divided'][subset] = os.path.join(folder, file)\n\n # preprocessed data\n for preprocess in self.preproc:\n folder = os.path.join(\n self.res, 'data', category, 'preprocessed', preprocess)\n\n files[category]['preprocessed'][preprocess] = {}\n for subset in self.subsets:\n file = \"{}_{}.json\".format(category, subset)\n files[category]['preprocessed'][preprocess][subset] = \\\n os.path.join(folder, file)\n\n # transformed data\n for transformation in self.trans:\n for preprocess in self.preproc:\n ctrans = \"{}_{}\".format(transformation, preprocess)\n\n folder = os.path.join(\n self.res, 'data', category, 'transformed', ctrans)\n\n files[category]['transformed'][ctrans] = {}\n for subset in self.subsets:\n file = \"{}_{}.json\".format(category, subset)\n files[category]['transformed'][ctrans][subset] = \\\n os.path.join(folder, file)\n\n # classified data\n for transformation in self.trans:\n for preprocess in self.preproc:\n ctrans = \"{}_{}\".format(transformation, preprocess)\n\n folder = os.path.join(\n self.res, 'data', category, 'p_classified', ctrans)\n files[category]['classified'][ctrans] = folder\n\n return files", "def create_data_directories(self):\r\n\r\n try:\r\n self.dir_variant_raw.mkdir(exist_ok=True, parents=True)\r\n self.dir_variant_effects.mkdir(exist_ok=True, parents=True)\r\n self.dir_variant_meta.mkdir(exist_ok=True, parents=True)\r\n\r\n self.dir_gene_raw.mkdir(exist_ok=True, parents=True)\r\n self.dir_gene_meta.mkdir(exist_ok=True, parents=True)\r\n\r\n self.dir_annotated_inter.mkdir(exist_ok=True, parents=True)\r\n self.dir_annotated_intra.mkdir(exist_ok=True, parents=True)\r\n\r\n except OSError as e:\r\n logging.getLogger(__name__).error('Could not make data directories: %s', e)\r\n exit(1)", "def _make_dirs(self) -> None:\n self._make_log_dir()\n self._make_ckpt_dir()\n if self.config.habitat_baselines.il.eval_save_results:\n self._make_results_dir()", "def directories():\n\treturn {\n\t\t\"CURRENT_DIR\": os.getcwd(),\n\t\t\"HOME_DIR\": os.path.expanduser(\"~\"),\n\t\t\"BASE_DIR\": os.path.dirname(__file__),\n\t}", "def create_dict_of_cases(list_of_file_numbers_and_parent_dirs):\n pass", "def _scan(self, scan_dir):\n output = {}\n root = os.path.join(self.base_dir, scan_dir)\n for filename in os.listdir(root):\n # read the metadata\n if filename.startswith('.'):\n continue\n path = os.path.join(root,filename)\n file_info = {}\n file_info['name'] = filename\n file_info['path'] = path\n file_info['metadata'] = {}\n file_info['content'] = \"\"\n metadata = []\n with open(path,'r') as f:\n line = f.readline()\n while line:\n if line.startswith(\":\"):\n metadata.append(line.strip())\n else:\n file_info['content'] += line\n line = f.readline()\n # pick out the metadata directives\n for md in metadata:\n mds = md.split(' ')\n mds[0] = mds[0].strip(':')\n if mds[0] == 'generate':\n self.generate.append((mds[1],mds[2],filename))\n if mds[0] == 'require' or mds[0] == 'include':\n self.components.append(mds[1])\n if mds[0] not in file_info['metadata']:\n file_info['metadata'][mds[0]] = []\n file_info['metadata'][mds[0]].append(mds[1:])\n output[filename] = file_info\n return output", "def prepare_folders():\n if not os.path.exists(\"data/usage\"):\n os.makedirs(\"data/usage\")", "async def create_file_dict(directory):\n file_paths = await get_list_of_files(directory)\n file_dict = {}\n for file_path in file_paths:\n value = file_path.replace('.py', '')\n key = value[value.rindex('.') + 1:]\n file_dict[key] = value\n return file_dict", "def create_files_dir(self):\n raw_data_dir = self.data_path / \"raw\"\n raw_data_dir.mkdir(exist_ok=True)\n\n processed_data_dir = self.data_path / \"processed\"\n processed_data_dir.mkdir(exist_ok=True)\n\n self.files_dir = self.data_path / \"raw\" / self.author_id\n\n print(\"Author's directory:\", self.files_dir.absolute())\n\n self.files_dir.mkdir(exist_ok=True)", "def _resources_mapper(self):\r\n dir_ = {}\r\n rfolder = self.resources_folder_dir\r\n rootdir = rfolder.rstrip(OS_SEP)\r\n start = rfolder.rfind(OS_SEP) + 1\r\n for path, dirs, files in os.walk(rootdir):\r\n folders = path[start:].split(OS_SEP)\r\n subdir = {\r\n f.split('.csv')[0].replace(self.country, '') : os.path.join(\r\n path, f\r\n )\r\n for f in filter(\r\n lambda f_: self.country in f_ and f_.endswith('.csv'),\r\n dict.fromkeys(files).keys()\r\n )\r\n }\r\n parent = ft.reduce(dict.get, folders[:-1], dir_)\r\n parent[folders[-1]] = subdir\r\n return dir_['resources']", "def parse_directory(file_path,pattern,var_order='rtczyx'):\n\n # validate the variable order\n val_variables(var_order)\n\n # get regular expression from file pattern\n regex, variables = get_regex(pattern)\n\n # initialize the output\n if len(variables) == 0:\n file_ind = []\n else:\n file_ind = {}\n files = [f.name for f in Path(file_path).iterdir() if f.is_file()]\n files.sort()\n\n # Unique values for each variable\n uvals = {key:[] for key in var_order}\n\n # Build the output dictionary\n for f in files:\n \n # Parse filename values\n variables = parse_filename(f,pattern)\n\n # If the filename doesn't match the pattern, don't include it\n if variables == None:\n continue\n \n # Generate the layered dictionary using the specified ordering\n temp_dict = file_ind\n if isinstance(file_ind,dict):\n for key in var_order:\n if variables[key] not in temp_dict.keys():\n if variables[key] not in uvals[key]:\n uvals[key].append(variables[key])\n if var_order[-1] != key:\n temp_dict[variables[key]] = {}\n else:\n temp_dict[variables[key]] = []\n temp_dict = temp_dict[variables[key]]\n \n # Add the file information at the deepest layer\n new_entry = {}\n new_entry['file'] = str(Path(file_path).joinpath(f).absolute())\n if variables != None:\n for key, value in variables.items():\n new_entry[key] = value\n temp_dict.append(new_entry)\n\n for key in uvals.keys():\n uvals[key].sort()\n \n return file_ind, uvals", "def _prepare_polib_files(files_dict, filename, languages,\n locale_root, po_files_path, header):\n files_dict[filename] = {}\n for lang in languages:\n file_path = os.path.join(locale_root, lang, po_files_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n if header is not None:\n _write_header(os.path.join(file_path, filename), lang, header)\n\n files_dict[filename][lang] = polib.pofile(\n os.path.join(file_path, filename), encoding=\"UTF-8\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy batches files from one directory to another The file names to be copied must fit a mask with numbers (start and end)
def copy_files_mask(mask: str, start: int, end: int, from_dir: str, to_dir: str): fnames = [mask.format(i) for i in range(start, end)] for fname in fnames: src = os.path.join(from_dir, fname) dst = os.path.join(to_dir, fname) shutil.copyfile(src, dst)
[ "def extend(source_filename, target_filename, batch_size=1000):\n\n with MBtiles(target_filename, \"r+\") as target, MBtiles(source_filename) as source:\n for batch in source.list_tiles_batched(batch_size):\n tiles_to_copy = [tile for tile in batch if not target.has_tile(*tile)]\n\n if tiles_to_copy:\n target.write_tiles(\n Tile(*tile, data=source.read_tile(*tile)) for tile in tiles_to_copy\n )", "def copy_images(source_list: list[str], source_path: str, destination_path: str):\n for image in source_list:\n shutil.copyfile(f'{source_path}{image}', f'./train/{destination_path}/{image}')", "def copy_image_files(img_ids, foldername):\n \n # Load all traffic related image ids:\n anns = load_anns(\"/Users/David/Repositories/cocoTraffic/annotations/21_coco_sub_all_traffic/\", \n 'instances_val2017Relabelled.json') \n \n # Filter for traffic lights:\n path = '../images/'\n count = 0\n\n for img in img_ids:\n filename = (str(img) + \".jpg\").zfill(16)\n\n try:\n copyfile(path+'val2017/'+filename, path+foldername+'/'+filename)\n count +=1\n except FileNotFoundError:\n print('Folder {} does not exist. Please create it and try again.'.format(path+foldername))\n return\n assert(count == len(img_ids))\n\n print('Copied {} images to {}'.format(count, path+foldername))", "def copy_files(source_dir, dest_dir, batch_file = 'run_DC_copy.bat', allow_dest_exist=False):\n\n if not platform.system() == 'Windows':\n raise OSError\n\n if not allow_dest_exist:\n if os.path.isdir(dest_dir):\n # Destination directory already exists\n print('''Destination directory exists. Rerun \n with --allow-overwrite flag to enable \n copying. Warning, this may cause overwriting \n of existing files.''')\n \n return -1\n\n else:\n os.mkdir(dest_dir)\n\n\n with open(batch_file, 'w') as bfile:\n\n bfile.write('ECHO OFF\\n')\n bfile.write('ROBOCOPY \"{}\" \"{}\" *.* /E /COPY:DT /DCOPY:DAT\\n'.format(source_dir, dest_dir))\n bfile.write('ATTRIB +R \"{}\"\\\\* /S'.format(dest_dir))\n\n try:\n os.system(batch_file)\n\n except:\n print('Batch file did not run correctly.')\n return -2\n\n finally:\n os.remove(batch_file)\n\n return 1", "def copy_test_images(source_list: list[str], input_path: str):\n for image in source_list:\n shutil.copyfile(f'{input_path}{image}', f'./test/1/{image}')", "def copy_dir(root_src_dir, root_dst_dir):\n\n for src_dir, dirs, files in os.walk(root_src_dir):\n dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)\n\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n for file_ in files:\n src_file = os.path.join(src_dir, file_)\n dst_file = os.path.join(dst_dir, file_)\n if os.path.exists(dst_file): \n # If we have .pbf's that clash, we want to merge them \n # together and write them to the new directory as dst_dir\n\n if file_ == \"metadata.json\":\n os.remove(dst_file)\n else:\n print \"\\n Merging tiles to... \" + dst_file\n\n with open(src_file, 'rb') as f:\n data = f.read()\n decoded_data1 = mapbox_vector_tile.decode(data)\n\n with open(dst_file, 'rb') as f:\n data = f.read()\n decoded_data2 = mapbox_vector_tile.decode(data)\n \n for k, v in decoded_data2.items():\n if k in decoded_data1:\n decoded_data1[k][\"features\"] += decoded_data2[k][\"features\"]\n else:\n decoded_data1[k] = decoded_data2[k] \n\n listofdict = []\n for k, v in decoded_data1.items():\n dic = {\n 'name': k,\n 'features': decoded_data1[k][\"features\"]\n }\n listofdict.append(dic)\n\n encoded_data = mapbox_vector_tile.encode(listofdict)\n with open(dst_file, 'w') as f:\n f.write(encoded_data)\n else: \n shutil.copy(src_file, dst_dir)", "def copyTiffImages(fullTIFImageDir,selectedTIFDir,finalBlackMaskDir):\n\tmaskPath = os.path.join(finalBlackMaskDir) + '*.png'\n\n\tif not (os.path.exists(selectedTIFDir)):\n\t\tos.mkdir(selectedTIFDir)\n\n\tfor maskFullPath in glob.glob( maskPath ):\n\t\timg = maskFullPath.replace('.png','.tif')\n\t\tname = img.replace(finalBlackMaskDir,fullTIFImageDir)\n\t\tprint(name)\n\t\tname_new = img.replace(finalBlackMaskDir,selectedTIFDir)\n\t\tshutil.copy(name,name_new)", "def test_with_binaryfile_copy(src,dest,fsize=100,ncopies=5):\n\n # Just handle all paths with linux style separators. This seems to work\n # in timeit. Passing in correctly formatted windows paths, didn't work.\n if hasattr(src,'__iter__'):\n for i,v in enumerate(src):\n if _sys.platform == \"win32\" and ('\\\\' in v):\n src[i] = repr(v)[1:-1].replace(\"\\\\\",\"/\").replace(\"//\",\"/\")\n else:\n if _sys.platform == \"win32\" and ('\\\\' in src):\n src = repr(src)[1:-1].replace(\"\\\\\",\"/\").replace(\"//\",\"/\")\n if hasattr(dest,'__iter__'):\n for i,v in enumerate(dest):\n if _sys.platform == \"win32\" and ('\\\\' in v):\n dest[i] = repr(v)[1:-1].replace(\"\\\\\",\"/\").replace(\"//\",\"/\")\n else:\n if _sys.platform == \"win32\" and ('\\\\' in dest):\n dest = repr(dest)[1:-1].replace(\"\\\\\",\"/\").replace(\"//\",\"/\")\n\n tf = \"temp.bin\"\n if isinstance(src,str):\n src = [_pp.join(*i) for i in _itertools.product([src],[tf])]\n elif hasattr(src,'__iter__'):\n src = [_pp.join(*i) for i in _itertools.product(src,[tf])]\n else:\n raise TypeError\n\n if isinstance(dest,str):\n dest = [_pp.join(*i) for i in _itertools.product([dest],[tf])]\n elif hasattr(src,'__iter__'):\n dest = [_pp.join(*i) for i in _itertools.product(dest,[tf])]\n else:\n raise TypeError\n\n ### Start looping through tests\n for enu,i in enumerate(src):\n # Make binary file\n print('')\n print(\"***** Source \"+str(enu+1)+\" of \" \\\n +str(len(src))+\" *****\")\n print(\"Creating a binary file to copy from at:\")\n print(i)\n with open(i,'wb') as fout:\n fout.write(_os.urandom(int(1024*1024*fsize)))\n\n for ind,j in enumerate(dest):\n\n print('')\n print(\"*** Copy destination \"+str(ind+1)+\" of \" \\\n +str(len(dest))+\" ***\")\n print(\"Destination location: %s\" % j)\n #print \"Source location : %s\" % i\n # copy to destination and record stats\n\n cmd = \"shutil.copy('\"+i+\"','\"+j+\"')\"\n #print i\n #print j\n #print cmd\n tvec = _timeit.repeat(cmd,setup=\"import shutil\",\n repeat=ncopies,number=1)\n\n print(\"Average copy time is: %s\" % _np.mean(tvec))\n print(\"Standard deviation of copy time is: %s\" % _np.std(tvec))\n\n print(\"Removing file %s\" % j)\n _os.remove(j)\n\n print(\"Removing file %s\" % i)\n _os.remove(i)", "def copyData(source, sink): \n \n (fileheader, fileext, digitfrmt) = splitFileExpression(sink)\n \n fp, fl = readFileList(source)\n \n for i in range(len(fl)):\n io.copyFile(os.path.join(fp, fl[i]), fileheader + (digitfrmt % i) + fileext)\n \n return sink", "def copy_samples():\n lane_line_fv_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'front_view_lane_line_for_training')\n lane_line_top_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'top_view_lane_line_for_training')\n non_lane_line_fv_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'tmp')\n non_lane_line_top_src_dir = ops.join(DVCNN_TRAIN_DATASET_SRC_DIR, 'top_view_non_lane_line_for_training')\n\n lane_line_fv_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'lane_line/front_view')\n lane_line_top_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'lane_line/top_view')\n non_lane_line_fv_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'non_lane_line/front_view')\n non_lane_line_top_dst_dir = ops.join(DVCNN_TRAIN_DATASET_DST_DIR, 'non_lane_line/top_view')\n\n for parents, _, filenames in os.walk(lane_line_fv_src_dir):\n for index, filename in enumerate(filenames):\n fv_src_filename = ops.join(parents, filename)\n top_src_filename = ops.join(lane_line_top_src_dir, filename.replace('fv', 'top'))\n\n fv_dst_filename = ops.join(lane_line_fv_dst_dir, filename)\n top_dst_filename = ops.join(lane_line_top_dst_dir, filename.replace('fv', 'top'))\n\n shutil.copyfile(src=fv_src_filename, dst=fv_dst_filename)\n shutil.copyfile(src=top_src_filename, dst=top_dst_filename)\n sys.stdout.write('\\r>>Copying lane line samples {:d}/{:d} {:s}'.format(index+1, len(filenames),\n filename[0:filename.find('.')]))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n for parents, _, filenames in os.walk(non_lane_line_fv_src_dir):\n for index, filename in enumerate(filenames):\n fv_src_filename = ops.join(parents, filename)\n top_src_filename = ops.join(non_lane_line_top_src_dir, filename.replace('fv', 'top'))\n\n fv_dst_filename = ops.join(non_lane_line_fv_dst_dir, filename)\n top_dst_filename = ops.join(non_lane_line_top_dst_dir, filename.replace('fv', 'top'))\n\n shutil.copyfile(src=fv_src_filename, dst=fv_dst_filename)\n shutil.copyfile(src=top_src_filename, dst=top_dst_filename)\n sys.stdout.write('\\r>>Copying non lane line samples {:d}/{:d} {:s}'.format(index + 1, len(filenames),\n filename[0:filename.find('.')]))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n return", "def copy_files(src_list, dest_list, *, b, nb, force_copy=False, n_proc=16):\n\n global s3c\n s3c = boto3.client(\"s3\")\n\n if not force_copy:\n existing_files = set(\n get_files(bucket=nb, prefix=os.path.commonprefix(dest_list))\n ) & set(dest_list)\n src_list, dest_list = zip(\n *[\n (src, dest)\n for src, dest in zip(src_list, dest_list)\n if dest not in existing_files\n ]\n )\n\n print(f\"copying {len(src_list)} files\")\n with ProcessPoolExecutor(max_workers=n_proc) as executor:\n list(\n executor.map(\n copy_file,\n itertools.repeat(b),\n itertools.repeat(nb),\n src_list,\n dest_list,\n chunksize=64,\n )\n )", "def run(self):\n rootdir = self.req_1.output().path\n newpath = self.output().path\n for src_dir, dirs, files in os.walk(rootdir):\n dst_dir = src_dir.replace(rootdir, newpath, 1)\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n counter = 0\n for file_ in files:\n src_file = os.path.join(src_dir, file_)\n shutil.copy(src_file, dst_dir)\n counter += 1\n if \"train\" in dst_dir and counter > 50:\n break\n elif \"test\" in dst_dir and counter > 10:\n break", "def copy_files(files, dst_folder):\n for f in glob.glob(files):\n shutil.copy(f, dst_folder)", "def copy_rd(self):\r\n target_dir=os.path.join(self.LOCAL_DIR,'in_sim')\r\n self.app_logger.info('Copying rd files to {target_dir}'\\\r\n .format(target_dir=target_dir))\r\n rd_file_list=glob.glob(os.path.join(self.LOCAL_DIR,self.MASK))\r\n for file in rd_file_list:\r\n shutil.copy(file,target_dir)", "def copy_minidumps(self, target, start_ts):\n logging.info(\"Copying minidumps from %s to %s with ctime >= %s\"\n % (self.minidump_search_path, target, start_ts))\n for filename in glob.glob(os.path.join(self.minidump_search_path, \"*.dmp\")):\n try:\n minidump_ctime = self.get_minidump_create_timestamp(filename)\n if minidump_ctime >= math.floor(start_ts):\n shutil.copy2(filename, target)\n else:\n logging.info(\"Ignored mindump: %s ctime: %s\" % (filename, minidump_ctime))\n except Exception:\n logging.exception(\"Error processing minidump at path: %s. Skipping it.\" % filename)", "def combine(startno, endno, ratio, destdir='/data/sim-240cata-7G/',sourcedir='/data/sim-catalogs/', prefix='RA240_DEC10_sqd300-', suffix='.cat'):\n round = numpy.ceil(float(startno)/ratio)\n for catano in range(startno,endno,ratio): #start=1,end=86400,step=240\n combinednames = genCombingnames(catano,ratio,sourcedir,prefix,suffix)\n destname = genDestname(round, ratio, destdir, prefix, suffix)\n cmd = \"cat %s > %s\" %(combinednames, destname)\n os.system(cmd)\n round += 1\n print \"finish combine %d files into %s\" %(ratio, destname)", "def copy_step_files(source, destination):\n # Copy feature files to the feature directory\n for f in glob(join(source, '*.py')):\n shutil.copyfile(f, join(destination, basename(f)))", "def copy(source, destination, extension):\r\n show_progress_bar(total)\r\n for foldername, subfolders, filenames in os.walk(source):\r\n for filename in filenames:\r\n if filename.endswith(extension):\r\n time = datetime.now().strftime(\"%H:%M:%S\")\r\n if not os.path.exists(os.path.join(destination, filename)):\r\n if args.log:\r\n log.append(f'{time} {filename} from {foldername}')\r\n shutil.copy(os.path.join(foldername, filename), os.path.join(destination, filename))\r\n else:\r\n new_filename = f'{os.path.basename(foldername)}_{filename}'\r\n if args.log:\r\n log.append(f'{time} {filename} from {foldername} and saving it as {new_filename}')\r\n shutil.copy(os.path.join(foldername, filename), os.path.join(destination, new_filename))\r\n show_progress_bar(total, copied)", "def copying_files(patient, source):\n mainpath = \"/masvol/data/dsb/{0}/{1}/study\".format(source, patient)\n origpath = \"{0}/sax*/*\".format(mainpath)\n\n count = 0\n\n for i in glob.glob(origpath):\n if not i.endswith('.dcm'):\n continue\n\n print (i) # original file\n nodes = i.split('/')\n print (nodes)\n filename = nodes[-1]\n print (filename) # original filename without path\n filenodes = filename.split('-')\n if len(filenodes) != 4:\n continue\n\n sax = filenodes[-1].replace('.dcm','')\n\n newdir = \"{0}/sax_{1}\".format(mainpath, int(sax))\n print (newdir) # new sax folder\n newname = newdir + '/' + '-'.join(filenodes[:-1]) + '.dcm'\n print (newname) # new dcm filename\n\n newdirpath = os.path.dirname(newname)\n\n if not os.path.exists(newdirpath):\n os.makedirs(newdirpath)\n \n #os.rename(i, newname)\n os.popen(\"cp {0} {1}\".format(i, newname)) # copying original from old sax to the new sax folder\n count += 1\n\n #if count > 5:\n # break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms an ECEF position into an ECI position.
def ecef2eci(r_ecef, GMST): DCM = ROT3(-GMST) # Rotation matrix r_eci = DCM.dot(r_ecef) return r_eci
[ "def ecef2eci(R_ECEF,time): \n #\n # T is the Julian Date in julian centuries\n #\n d = time - 2451545.0;\n T = d/ 36525;\n #\n # Compute Greenwich Mean sidereal Time (in hours)\n #\n GMST = 2*np.pi*(0.7790572732640 + 1.00273781191125448*d)\n # \n # Compute Rotation Matrix\n #\n R_ECI = np.zeros((3,np.size(T)))\n #\n for i in range(0,np.size(T)):\n RT = np.array([[+np.cos(GMST[i]), -np.sin(GMST[i]), 0], \n [ +np.sin(GMST[i]),+np.cos(GMST[i]), 0], \n [0 , 0 , 1 ]]);\n #\n R_ECI[:,i] = np.matmul(RT,R_ECEF.T)\n #\n \n return R_ECI", "def _propagate_only_position_ecef(self, timetuple):\n position_eci, _ = self.propagator.propagate(*timetuple)\n gmst = _gstime(jday(*timetuple))\n return coordinate_systems.eci_to_ecef(position_eci, gmst)", "def xyz(self):\n c_angle = self.c / self.peg.radius\n s_angle = self.s / self.peg.radius\n r = self.peg.radius + self.h\n # from spherical to cartesian\n xyz_local = array ([r * cos (c_angle) * cos (s_angle),\n r * cos (c_angle) * sin (s_angle),\n r * sin (c_angle)])\n # from local xyz to ECEF xyz\n xyz = self.peg.rotation_matrix.dot(xyz_local) + self.peg.translation_vector\n return XYZ(xyz[0], xyz[1], xyz[2], self.peg.ellipsoid)", "def geodetic2ecef(lon, lat, alt=0):\n lat = np.radians(lat)\n lon = np.radians(lon)\n xi = np.sqrt(1 - ESQ * np.sin(lat))\n x = (A / xi + alt) * np.cos(lat) * np.cos(lon)\n y = (A / xi + alt) * np.cos(lat) * np.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * np.sin(lat)\n return x, y, z", "def toe_positions(self):\n torso_frame = self.data.xmat['torso'].reshape(3, 3)\n torso_pos = self.data.xpos['torso']\n torso_to_toe = self.data.xpos[_TOES] - torso_pos\n return torso_to_toe.dot(torso_frame)", "def geodetic2ecef(lat, lon, alt=0):\n\n lat, lon = math.radians(lat), math.radians(lon)\n xi = math.sqrt(1 - ESQ * math.sin(lat))\n x = (A / xi + alt) * math.cos(lat) * math.cos(lon)\n y = (A / xi + alt) * math.cos(lat) * math.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * math.sin(lat)\n return x, y, z", "def update_position(self, msg):\n\t\tself.ekf.pos = enu_to_ned(np.array([[msg.pose.position.x], [msg.pose.position.y], [msg.pose.position.z]]))", "def _propagate_ecef(self, when_utc):\n timetuple = (when_utc.year, when_utc.month, when_utc.day,\n when_utc.hour, when_utc.minute, when_utc.second + when_utc.microsecond * 1e-6)\n\n position_eci, velocity_eci = self.propagator.propagate(*timetuple)\n gmst = _gstime(jday(*timetuple))\n position_ecef = coordinate_systems.eci_to_ecef(position_eci, gmst)\n velocity_ecef = coordinate_systems.eci_to_ecef(velocity_eci, gmst)\n return (position_ecef, velocity_ecef)", "def _figure_coordinates(self, position):\n position = np.array(position)\n \n scaled = np.atleast_2d((position - self._origin) / self._resolution)\n # flip array in left-right direction\n return np.fliplr(scaled).astype(np.uint16).reshape(position.shape)", "def ecl_frame(self):\n if self._alt_az_frame is None or self._ecl_frame is None:\n self._ecl_frame = self.alt_az_frame.transform_to(\n astropy.coordinates.HeliocentricTrueEcliptic)\n return self._ecl_frame", "def geod2ecef(geod):\n if len(geod.shape) == 1:\n lat = geod[0]\n lon = geod[1]\n if len(geod) == 3:\n alt = geod[2]\n else:\n alt = 0.0\n else:\n lat = geod[:, 0]\n lon = geod[:, 1]\n if geod.shape[1] == 3:\n alt = geod[:, 2]\n else:\n alt = 0.0\n\n a = 6378137\n e = 8.1819190842622e-2\n N = a / np.sqrt(1 - e**2 * np.sin(lat)**2)\n\n x = (N + alt) * np.cos(lat) * np.cos(lon)\n y = (N + alt) * np.cos(lat) * np.sin(lon)\n z = ((1-e**2) * N + alt) * np.sin(lat)\n\n if len(geod.shape) == 1:\n return np.array([x, y, z])\n else:\n return np.column_stack((x, y, z))", "def aces_cc_to_linear(value, **kwargs):\n\n return ACES_CC_EOCF(value)", "def _cie_rgb_EOCF(value):\n\n value = np.asarray(value)\n\n return value ** 2.2", "def geodetic_from_ecef(x, y, z):\n #http://code.google.com/p/pysatel/source/browse/trunk/coord.py?r=22\n\n # load wgs constants\n wgs = wgs_constants()\n a = wgs.a\n b = wgs.b\n esq = wgs.esq\n e1sq = wgs.e1sq\n \n r = sqrt(x * x + y * y)\n Esq = a * a - b * b\n F = 54 * b * b * z * z\n G = r * r + (1 - esq) * z * z - esq * Esq\n C = (esq * esq * F * r * r) / (pow(G, 3))\n S = cbrt(1 + C + sqrt(C * C + 2 * C))\n P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)\n Q = sqrt(1 + 2 * esq * esq * P)\n r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \\\n P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)\n #U = sqrt(pow((r - esq * r_0), 2) + z * z)\n V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)\n Z_0 = b * b * z / (a * V)\n #h = U * (1 - b * b / (a * V))\n lat = arctan((z + e1sq * Z_0) / r)\n lon = arctan2(y, x)\n return lat, lon\n #return degrees(lat), degrees(lon)", "def pos_to_hgvspos(\n lvecpos,\n vec_corange_cloned,\n vec_corange_exons,\n cdna_corange_exons ):\n\n\n assert len(vec_corange_exons)==len(cdna_corange_exons), 'must be same # of exons in both parameters'\n\n for (corng_vec,corng_ex) in zip(vec_corange_exons,cdna_corange_exons):\n assert corng_ex[1]-corng_ex[0]+1 == corng_vec[1]-corng_vec[0]+1 , 'exons must be same lengths'\n assert corng_vec[0] >= vec_corange_cloned[0] and corng_vec[1] <= vec_corange_cloned[1], 'exon must be entirely within cloned region'\n\n loutpos=[]\n\n for vecpos in lvecpos:\n if vecpos < vec_corange_cloned[0] or vecpos > vec_corange_cloned[1]:\n coordstr=None\n else:\n # is there only one exon?\n if len( vec_corange_exons ) == 1:\n # is the location before the exon, after, or inside?\n if vecpos < vec_corange_exons[0][0]:\n coordstr = 'c.{:d}-{:d}'.format(\n cdna_corange_exons[0][0],\n vec_corange_exons[0][0]-vecpos )\n elif vecpos > vec_corange_exons[0][1]:\n coordstr = 'c.{:d}+{:d}'.format(\n cdna_corange_exons[0][1],\n vecpos-vec_corange_exons[0][1] )\n else:\n coordstr = 'c.{:d}'.format( cdna_corange_exons[0][0] + vecpos - vec_corange_exons[0][0] )\n else:\n # not handling multiple exons yet; if in an intron, we'd need to figure out which is nearer, then create the coordinate relative to that\n assert 1==0, 'not implemented yet'\n\n loutpos.append(coordstr)\n\n return loutpos", "def transformFromPosition(*args):\n return _almathswig.transformFromPosition(*args)", "def fen2efen(fen):\n\tpieces_pos, clr_to_move, castl_avl, en_pas_targ, half_mov_clk, mov_clk = read_fen(fen)\n\texp_piece_pos = expand_piece_pos(pieces_pos)\n\treturn exp_piece_pos+' '+clr_to_move+' '+castl_avl+' '+en_pas_targ+' '+half_mov_clk+' '+mov_clk", "def cvPosition(*args, **kwargs):\n \n pass", "def track(self):\n return np.arctan2(\n self.v_e,\n self.u_e,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes Azimuth (angle from north measured to east), Elevation, and range of the position r_ecef with respect to a reference r_local.
def ecef2AzElRange(r_ecef, r_local, R_eq, e_planet): tol = 0.001 * np.pi/180.0 # Tolerance (0.001 deg) lla = ecef2lla(r_local, R_eq, e_planet, tol) # Compute Latitude, Longitude, Altitude r_sez = ecef2sez(r_ecef, lla[0], lla[1], lla[2], R_eq, e_planet) azElRange = sez2AzElRange(r_sez) return azElRange
[ "def _az_alt( self, lat, lon, utc_offset, sun ):\n # Sun's Mean Longitude, L, gives us GMT at midnight.\n # GMST0 = (L + 180)/15\n GMST0 = ((sun.L+180)/15) % 24\n\n # Local Sidereal Time = GMST0 + UT + LON/15\n self.LST = GMST0 + utc_offset + lon/15\n\n # Hour Angle (in degrees) = 15*(LST - RA (in hours))\n self.HA = (15*(self.LST - self.RA/15)) % 360\n\n # celestial rectangular (x,y,z) coordinate\n x = math.cos(math.radians(self.HA)) * math.cos(math.radians(self.Decl))\n y = math.sin(math.radians(self.HA)) * math.cos(math.radians(self.Decl))\n z = math.sin(math.radians(self.Decl))\n\n # rotate this x,y,z system along the Y axis\n xhor = x*math.sin(math.radians(lat)) - z*math.cos(math.radians(lat))\n yhor = y\n zhor = x*math.cos(math.radians(lat)) + z*math.sin(math.radians(lat))\n\n self.azimuth = math.degrees( math.atan2( yhor, xhor ) ) + 180\n self.altitude = math.degrees( math.atan2( zhor, math.sqrt(xhor**2+yhor**2) ) )\n\n return self.azimuth, self.altitude", "def local_az_alt( self, lat, lon, utc_offset ):\n az, alt = self._az_alt( lat, lon, utc_offset, self.sun )\n self.az, self.alt_geoc = az, alt\n # Compute azimuthal coordinates by applying LST.\n # Topocentric vs. geocentric altitude adjustment\n mpar = math.degrees( math.asin( 1/self.r ) )\n alt_topoc = alt - mpar * math.cos(math.radians(alt))\n self.alt_topoc = alt_topoc\n return az, alt_topoc", "def AzEl_to_RaDec(azimuth,elevation,latitude,longitude,dateUTtime):\n year, doy = dateUTtime\n mjd = DT.MJD(year, doy)\n LST = greenwich_sidereal_time(*dateUTtime)-longitude/15.\n HA,dec = AzEl_to_HaDec(azimuth, elevation, latitude)\n RA = math.fmod(LST - HA, 24.)\n cirs_ra = obs_ra_to_cirs_ra(RA, mjd, longitude, latitude)\n if cirs_ra < 0:\n cirs_ra += 24.\n return cirs_ra,dec", "def calculate_desired_alaz(self):\n target = self.coordselector.currentText()\n\n # Convert from QString to String to not confuse ephem\n leftcoord = str(self.inputleftcoord.text())\n rightcoord= str(self.inputrightcoord.text())\n\t\n\t# Calculate offset values in desired unit\n offsetsys = self.coordselector_steps.currentText()\n # ASSUME HORIZONTAL FOR NOW\n # Read specified offset grid\n # Convert from QString to String to not confuse ephem,then to decimal degrees in case the string had XX:XX:XX.XX format.\n try:\n offset_left = float(ephem.degrees(str(self.offset_left.text())))*180.0/np.pi\n offset_right= float(ephem.degrees(str(self.offset_right.text())))*180.0/np.pi\n nsteps_left = max(float(self.nsteps_left.text()),1)\n nsteps_right= max(float(self.nsteps_right.text()),1)\n\t gs_left = offset_left*(nsteps_left-1)\n\t gs_right = offset_right*(nsteps_right-1)\n if gs_left ==0:\n offsets_left = np.array([0.0])\n else:\n\t offsets_left = np.linspace(-0.5*gs_left,0.5*gs_left,num=nsteps_left)\n if gs_right ==0:\n offsets_right = np.array([0.0])\n else:\n\t offsets_right = np.linspace(-0.5*gs_right,0.5*gs_right,num=nsteps_right)\n except ValueError, IndexError:\n offsets_left = np.array([0.0])\n offsets_right = np.array([0.0])\n \n # Reset values in case they are \"The Sun\"\n # since otherwise errors appear when switchin from \"The Sun\"-mode \n # to something else\n try:\n ephem.degrees(leftcoord)\n ephem.degrees(rightcoord)\n except ValueError:\n leftcoord = 0.0\n rightcoord = 0.0\n\n self.telescope.site.date = ephem.now()\n if target == 'Horizontal':\n alt = ephem.degrees(leftcoord)\n az = ephem.degrees(rightcoord)\n # Make sure azimut is given in interval 0 to 360 degrees.\n #az = (float(rightcoord) %360.0)* np.pi/180.0\n # Save as targetpos, will be minor offset because of radec_of conversion\n # Note reverse order of az, alt in this radec_of-function.\n #(ra, dec) = self.telescope.site.radec_of(az, alt)\n #pos = ephem.FixedBody()\n #pos._ra = ra\n #pos._dec = dec\n #pos._epoch = self.telescope.site.date\n #pos.compute(self.telescope.site)\n # Do not set position to tracking target in this case, because of radec_of discrepancy.\n # Instead set to given values manually\n alt_deg = float(alt)*180.0/np.pi\n az_deg = float(az)*180.0/np.pi\n \n elif target == 'Stow':\n # Read stow position from file\n (alt_deg,az_deg)=self.telescope.get_stow_alaz()\n\n else:\n # If given system is something else, we do not have to use radec_of and we get\n # http://stackoverflow.com/questions/11169523/how-to-compute-alt-az-for-given-galactic-coordinate-glon-glat-with-pyephem\n if target == 'The Sun':\n pos = ephem.Sun()\n pos.compute(self.telescope.site) # Needed for the sun since depending on time\n elif target == 'The Moon':\n pos = ephem.Moon()\n pos.compute(self.telescope.site) # Needed for the moon since depending on time\n elif target == 'Cas. A':\n pos = ephem.Equatorial(ephem.hours('23:23:26'), ephem.degrees('58:48:0'), epoch=ephem.J2000)\n # Coordinate from http://en.wikipedia.org/wiki/Cassiopeia_A\n elif target == 'Galactic':\n pos = ephem.Galactic(ephem.degrees(leftcoord), ephem.degrees(rightcoord))\n elif target == 'Eq. J2000':\n pos = ephem.Equatorial(ephem.hours(leftcoord), ephem.degrees(rightcoord), epoch=ephem.J2000)\n elif target == 'Eq. B1950':\n pos = ephem.Equatorial(ephem.hours(leftcoord), ephem.degrees(rightcoord), epoch=ephem.B1950)\n elif target == 'Ecliptic':\n pos = ephem.Ecliptic(ephem.degrees(leftcoord), ephem.degrees(rightcoord)) # Use some epoch?\n #pos = ephem.Ecliptic(ephem.degrees(leftcoord), ephem.degrees(rightcoord), epoch=ephem.J2000)\n # Calculate alt, az, via fixedbody since only fixed body has alt, az\n # First needs to make sure we have equatorial coordinates\n eqpos = ephem.Equatorial(pos)\n fixedbody = ephem.FixedBody()\n fixedbody._ra = eqpos.ra\n fixedbody._dec = eqpos.dec\n fixedbody._epoch = eqpos.epoch\n fixedbody.compute(self.telescope.site)\n alt = fixedbody.alt\n az = fixedbody.az\n alt_deg = float(alt)*180.0/np.pi\n az_deg = float(az)*180.0/np.pi\n\n # If horizontal offset, and if az-scale checkbox selected,\n # then scale current az offset value with cos(alt)\n if self.scale_az_offset.isChecked() and offsetsys == 'Horizontal':\n offsets_right *= (1.0/np.cos((alt_deg+offsets_left[self.leftiter])*(np.pi/180.0)))\n\n #TODO: Implement non-horizontal offset\n checklpos = alt_deg + offsets_left\n checkrpos = az_deg + offsets_right\n\n if self.allow_flip.isChecked():\n flipleft = [180-lp for lp in checklpos]\n flipright = [(rp+180)%360 for rp in checkrpos]\n \n # Check if directions are reachable\n can_reach_all = True\n can_flipreach_all = True\n for i in range(len(checklpos)):\n for j in range(len(checkrpos)):\n # Check if the desired direction is best reached via simple alt, az\n # or at 180-alt, az+180.\n reach = self.telescope.can_reach(checklpos[i], checkrpos[j]) \n flipreach = self.telescope.can_reach(flipleft[i], flipright[j])\n if not reach:\n can_reach_all = False\n if not flipreach:\n can_flipreach_all = False\n\n # If flip direction cannot be reached, return original one.\n # (even if this one may not be reached)\n if not can_flipreach_all:\n leftpos = checklpos\n rightpos = checkrpos\n\n # But, if flip direction can be reached, but not original one,\n # then we have to go to flipdirection to point to this position\n # E.g. in mecanically forbidden azimuth range\n elif can_flipreach_all and (not can_reach_all):\n leftpos = flipleft\n rightpos = flipright\n # If both directions are valid, which is the most common case,\n # then we find the closest one (in azimuth driving, not in angular distance)\n # to the current pointing\n elif can_flipreach_all and can_reach_all: \n (calt_deg, caz_deg) = self.telescope.get_current_alaz()\n flipd = self.telescope.get_azimuth_distance(caz_deg, flipright[self.rightiter])\n noflipd = self.telescope.get_azimuth_distance(caz_deg, checkrpos[self.rightiter])\n if flipd<noflipd:\n # Flip is closer, so do it\n leftpos = flipleft\n rightpos = flipright\n else:\n # No flip is closer, so don't flip\n leftpos = checklpos\n rightpos = checkrpos\n else:\n leftpos = checklpos\n rightpos = checkrpos\n # Update coordinates\n self.leftpos = leftpos\n self.rightpos = rightpos\n return (self.leftpos[self.leftiter], self.rightpos[self.rightiter])", "def elevation(lat, lon, utc, method=\"ASHRAE\", interval=None, h=None):\n\n # Calculate solar coefficients at UTC\n sinDec, cosDec, eqnOfTime, solFactor = orbit(utc, method=method)\n\n # Calculate extraterrestrial radiance at UTC\n E0 = solFactor * total_solar_irradiance(utc, method=method)\n\n # Latitudinal sines\n sinLat = np.sin(np.radians(lat))\n cosLat = np.cos(np.radians(lat))\n\n def int_elevation(h):\n \"\"\"\n Instant elevation at hour angle h\n \"\"\"\n return np.maximum(sinDec * sinLat + cosDec * cosLat * np.cos(h), 0)\n\n def avg_elevation(h1, h2):\n \"\"\"\n Integrated elevation between h1 and h2\n \"\"\"\n return np.maximum(\n sinLat * sinDec * (h2 - h1) + cosLat * cosDec * (np.sin(h2) - np.sin(h1)), 0\n )\n\n # Default interval is instantaneous\n if interval is None:\n interval = \"instant\"\n\n interval = interval.lower()[0]\n\n # Determine elevation\n if interval == \"i\":\n \"\"\"\n Instantaneous\n \"\"\"\n # Instantaneous hour angle\n if h is None:\n h = hour_angle(lon, utc, eqnOfTime)\n # Instantaneous elevation\n z = int_elevation(h)\n\n elif interval == \"m\":\n \"\"\"\n Instantaneous mid-point of previous hour, i.e. approximate average\n \"\"\"\n # Instantaneous hour angle at 30 minutes prior\n h = hour_angle(lon, utc - np.timedelta64(30, \"m\"), eqnOfTime)\n # Instantaneous elevation\n z = int_elevation(h)\n\n elif interval == \"h\":\n \"\"\"\n Hourly\n \"\"\"\n # Sunset hour angle\n h0 = np.arccos(np.clip(-sinDec / cosDec * sinLat / cosLat, -1, 1))\n # One hour (radians)\n dh = np.pi / 12\n # Start and end hour angles\n h = hour_angle(lon, utc, eqnOfTime)\n a = (h - dh + np.pi) % (2 * np.pi) - np.pi\n b = a + dh\n # Default elevation is zero\n z = np.zeros_like(h)\n # Conditions\n a1 = a < -h0\n a2 = (a >= -h0) & (a < h0)\n # b1 = (b < -h0)\n b2 = (b >= -h0) & (b < h0)\n b3 = b >= h0\n # Dawn\n np.copyto(z, avg_elevation(-h0, b), where=a1 & b2)\n # Comes up very briefly between a & b\n np.copyto(z, avg_elevation(-h0, h0), where=a1 & b3)\n # Sun's up\n np.copyto(z, avg_elevation(a, b), where=a2 & b2)\n # Dusk\n np.copyto(z, avg_elevation(a, h0), where=a2 & b3)\n # Scale by interval\n z /= dh\n\n elif interval == \"d\":\n \"\"\"\n Daily\n \"\"\"\n # Sunset hour angle\n h = np.arccos(np.clip(-sinDec / cosDec * sinLat / cosLat, -1, 1))\n # Average daily elevation\n z = avg_elevation(-h, h)\n # Scale by 24-hour interval\n z /= 2 * np.pi\n\n else:\n raise ValueError(\n \"Interval must be one of 'instant', 'midpoint', \" \"'hourly', or 'daily'\"\n )\n\n return z, E0", "def ecl_lon(self):\n return (self.ecl_frame.lon - self._sun_radec.ra).to(u.deg).value", "def azimuth_range(self):\n if self._azimuth_range is None:\n startaz = self._obj.attrs[\"startazA\"]\n stopaz = self._obj.attrs[\"stopazA\"]\n zero_index = np.where(stopaz < startaz)\n stopaz[zero_index[0]] += 360\n azimuth_data = (startaz + stopaz) / 2.0\n da = xr.DataArray(azimuth_data, attrs=az_attrs_template.copy())\n self._azimuth_range = da\n return self._azimuth_range", "def get_azimuth(self):\n co = self.get_car()\n return np.arctan2(co[0], co[1])", "def orbit(self, azim, elev):\n self.camera_azimuth += azim\n #self.opts['elevation'] += elev\n self.camera_elevation = np.clip(self.camera_elevation + elev, -90, 90)\n self.update()", "def calc_irradiance_module_sky_diffuse(self):\n\n vectors_front = np.multiply.outer(self.L - self.l_array, self.e_m) - np.array(\n [self.dist, 0]\n )\n\n vectors_front_normalized = vectors_front / np.linalg.norm(vectors_front, axis=1)[:,None]\n \n cos_alpha_2 = np.dot(vectors_front, self.n_m) / np.linalg.norm(\n vectors_front, axis=1\n )\n \n \n# =============================================================================\n# self.tmp[\"alpha_2_front\"] = np.arccos(cos_alpha_2)\n# =============================================================================\n \n alpha_2_front = np.arctan2(np.cross(self.n_m, vectors_front_normalized),\n np.dot(vectors_front_normalized, self.n_m))\n\n self.tmp[\"alpha_2_front\"] = alpha_2_front\n \n #np.arctan2(np.cross(vectors_front_normalized, self.n_m),\n # np.dot(vectors_front_normalized, self.n_m))\n \n \n spacing_alpha = np.linspace(-np.pi / 2, np.pi / 2, 1200)\n dist_alpha = np.cos(spacing_alpha)\n\n selector = np.greater.outer(spacing_alpha, self.tmp[\"alpha_2_front\"]).T\n dist_alpha = np.tile(dist_alpha, (self.module_steps, 1))\n dist_alpha[selector] = 0\n\n np.trapz(dist_alpha, np.tile(spacing_alpha, (self.module_steps, 1)), axis=1) / 2\n\n #sin_alpha_2 = (1 - cos_alpha_2 ** 2) ** 0.5\n \n irradiance_front = (np.sin(alpha_2_front) + 1) / 2.0\n\n vectors_back = np.multiply.outer(self.L - self.l_array, self.e_m) + np.array(\n [self.dist, 0]\n )\n cos_epsilon_1 = np.dot(vectors_back, -self.n_m) / norm(vectors_back, axis=1)\n\n self.tmp[\"epsilon_1_back\"] = np.pi / 2 - np.arccos(cos_epsilon_1)\n\n sin_epsilon_2 = (1 - cos_epsilon_1 ** 2) ** 0.5\n irradiance_back = (1 - sin_epsilon_2) / 2\n\n self.results[\"irradiance_module_front_sky_diffuse\"] = irradiance_front\n self.results[\n \"irradiance_module_front_sky_diffuse_mean\"\n ] = irradiance_front.mean()\n\n self.results[\"irradiance_module_back_sky_diffuse\"] = irradiance_back\n self.results[\"irradiance_module_back_sky_diffuse_mean\"] = irradiance_back.mean()", "def azimuth_angle(self):\n\t\tdiv = math.cos(math.radians(self.declination_angle())) * (math.sin(math.radians(self.hour_angle())) / math.cos(math.radians(self.altitude_angle())))\n\t\treturn math.degrees(math.asin(div))", "def local_hour_angle(model_time, longitude, right_ascension):\n return local_mean_sidereal_time(model_time, longitude) - right_ascension", "def to_earth_location(self):\n # in astropy, x points north, y points east, so we need a minus for y.\n cart = CartesianRepresentation(self.x, -self.y, self.z)\n altaz = AltAz(cart, location=self.reference_location)\n return _altaz_to_earthlocation(altaz)", "def get_relative_coordinates(self):\n altitude_feet = self.geo_altitude * 3.28084\n c_radius_of_earth = 6371\n c_feet_to_km = 0.0003048\n\n f1 = math.radians(home_latitude)\n f2 = math.radians(self.latitude)\n delta_f = math.radians(self.latitude - home_latitude)\n delta_g = math.radians(self.longitude - home_longitude)\n a = math.sin(delta_f / 2) * math.sin(delta_f / 2) + math.cos(f1) * math.cos(f2) * math.sin(\n delta_g / 2) * math.sin(\n delta_g / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n self.distance_km = round(c_radius_of_earth * c)\n\n bearing_radians = math.atan2(\n math.sin(self.longitude - home_longitude) * math.cos(self.latitude)\n , math.cos(home_latitude) * math.sin(self.latitude) - math.sin(home_latitude) * math.cos(self.latitude)\n * math.cos(self.longitude - home_longitude)\n )\n\n self.relative_azimuth_degrees = round((360.0 - math.degrees(bearing_radians)) % 360.0)\n self.angle_altitude_degrees = round(\n math.degrees(math.atan(altitude_feet * c_feet_to_km / self.distance_km))) # returns azimuth.", "def orbit(self, azim, elev):\n self.opts['azimuth'] += azim\n self.opts['elevation'] += elev\n #self.opts['elevation'] = np.clip(self.opts['elevation'] + elev, -90, 90)\n self.update()", "def RaDec_to_AzEl(RA, dec, latitude, longitude, dateUTtime):\n year, doy = dateUTtime\n mjd = DT.MJD(year, doy)\n cirs_ra = cirs_ra_to_obs_ra(RA, mjd, longitude, latitude)\n LST = greenwich_sidereal_time(*dateUTtime)-longitude/15.\n HourAngle = LST - cirs_ra\n if HourAngle < -12:\n HourAngle += 24.\n az, el = HaDec_to_AzEl(HourAngle, dec, latitude)\n return az, el", "def arc_to_euler(vector0, vector1, axes=XYZ):\n \n vector0 = _setDimension(vector0,2)\n vector1 = _setDimension(vector1,2)\n axes = _setDimension(axes,1,dtype=np.int32)\n \n vector0, vector1, axes = _matchDepth(vector0, vector1, axes)\n \n return quaternionToEuler(_vectorArcToQuaternion(vector0,vector1), axes)", "def altitude_angle(self):\n\t\ta = math.sin(math.radians(self.latitude)) * math.sin(math.radians(self.declination_angle()))\n\t\tb = math.cos(math.radians(self.latitude)) * math.cos(math.radians(self.declination_angle())) * math.cos(math.radians(self.hour_angle()))\n\t\tc = a+b\n\t\td = math.asin(c)\n\t\treturn math.degrees(d) #units = degress", "def CoordinateCalculator(CurrentLatitude,CurrentLongitude,TargetLatitude,TargetLongitude):\n \n r = EarthRadius #(m)\n Phi1 = CurrentLatitude * np.pi / 180 #(Rad)\n Lambda1 = CurrentLongitude * np.pi / 180 #(Rad)\n Phi2 = TargetLatitude * np.pi / 180 #(Rad)\n Lambda2 = TargetLongitude * np.pi / 180 #(Rad)\n \n if -180 <= Lambda2 - Lambda1 <= 180: Lambda12 = Lambda2 - Lambda1 #(Rad)\n if Lambda2 - Lambda1 > 180: Lambda12 = (Lambda2 - Lambda1) - 2 * np.pi #(Rad)\n if Lambda2 - Lambda1 < -180: Lambda12 = (Lambda2 - Lambda1) + 2 * np.pi #(Rad)\n \n Alpha1 = np.arctan2(np.array(np.sin(Lambda12)),np.array(np.cos(Phi1) * np.tan(Phi2) - np.sin(Phi1) * np.cos(Lambda12))) #(Rad)\n Alpha2 = np.arctan2(np.array(np.sin(Lambda12)),np.array(-np.cos(Phi2) * np.tan(Phi1) + np.sin(Phi2) * np.cos(Lambda12))) #(Rad)\n DeltaTheta12 = np.arccos((np.sin(Phi1) * np.sin(Phi2) + np.cos(Phi1) * np.cos(Phi2) * np.cos(Lambda12))) #(Rad)\n ArcLength = DeltaTheta12 * r #(m)\n Alphao = np.arcsin(np.sin(Alpha1) * np.cos(Phi1)) #(Rad)\n DeltaSigma01 = np.arctan2(np.array(np.tan(Phi1)),np.array(np.cos(Alpha1))) #(Rad)\n DeltaSigma02 = DeltaSigma01 + DeltaTheta12 #(Rad)\n Lambda01 = np.arctan2(np.array(np.sin(Alphao) * np.sin(DeltaSigma01)),np.array(np.cos(DeltaSigma01))) #(Rad)\n Lambdao = Lambda1 - Lambda01 #(Rad)\n LatList = []\n LatList1 = []\n LatList2 = []\n LatList3 = []\n LongList = []\n LongList1 = []\n LongList2 = []\n LongList3 = []\n for i in range(101):\n Sigma = DeltaSigma01 + (i * (DeltaSigma02 - DeltaSigma01))/100 #(Rad)\n Phi = (np.arcsin(np.cos(Alphao) * np.sin(Sigma)) * 180 / np.pi) #(Degrees)\n Lambda = (Lambdao + np.arctan2(np.array(np.sin(Alphao) * np.sin(Sigma)),np.array(np.cos(Sigma)))) * 180 / np.pi #(Degrees)\n if -180 <= Lambda <= 180:\n LongList1.append(Lambda) #(Degrees)\n LatList1.append(Phi) #(Degrees)\n if Lambda > 180:\n LongList2.append(Lambda - 360) #(Degrees)\n LatList2.append(Phi) #(Degrees)\n if Lambda < -180:\n LongList3.append(Lambda + 360) #(Degrees)\n LatList3.append(Phi) #(Degrees)\n\n im = plt.imread('EarthCordiants.jpg')\n im = plt.imshow(im, interpolation='bilinear', cmap=cm.gray, origin='lower', extent=[-180,180,-90,90])\n plt.xlabel('Longitude')\n plt.ylabel('Latitude')\n plt.title('Path of Rocket')\n plt.plot(LongList1, LatList1, 'r')\n plt.plot(LongList2, LatList2, 'r')\n plt.plot(LongList3, LatList3, 'r')\n plt.show()\n \n LatList.append(LatList1)\n LatList.append(LatList2)\n LatList.append(LatList3)\n LongList.append(LongList1)\n LongList.append(LongList2)\n LongList.append(LongList3)\n \n return LatList, LongList, Alpha1 * 180 / np.pi, ArcLength", "def geodetic2ecef(lon, lat, alt=0):\n lat = np.radians(lat)\n lon = np.radians(lon)\n xi = np.sqrt(1 - ESQ * np.sin(lat))\n x = (A / xi + alt) * np.cos(lat) * np.cos(lon)\n y = (A / xi + alt) * np.cos(lat) * np.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * np.sin(lat)\n return x, y, z" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms ECEF position into SEZ (South, East, Zenith) using LLA of a reference position and an ellipsoid model for the planet.
def ecef2sez(r_ecef, latitude, longitude, altitude, R_eq, e_planet): r_site = lla2ecef(latitude, longitude, altitude, R_eq, e_planet) r_sez = ROT2(np.pi/2-latitude).dot(ROT3(longitude)).dot(r_ecef-r_site) return r_sez
[ "def geodetic2ecef(lon, lat, alt=0):\n lat = np.radians(lat)\n lon = np.radians(lon)\n xi = np.sqrt(1 - ESQ * np.sin(lat))\n x = (A / xi + alt) * np.cos(lat) * np.cos(lon)\n y = (A / xi + alt) * np.cos(lat) * np.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * np.sin(lat)\n return x, y, z", "def geodetic2ecef(lat, lon, alt=0):\n\n lat, lon = math.radians(lat), math.radians(lon)\n xi = math.sqrt(1 - ESQ * math.sin(lat))\n x = (A / xi + alt) * math.cos(lat) * math.cos(lon)\n y = (A / xi + alt) * math.cos(lat) * math.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * math.sin(lat)\n return x, y, z", "def xyz(self):\n c_angle = self.c / self.peg.radius\n s_angle = self.s / self.peg.radius\n r = self.peg.radius + self.h\n # from spherical to cartesian\n xyz_local = array ([r * cos (c_angle) * cos (s_angle),\n r * cos (c_angle) * sin (s_angle),\n r * sin (c_angle)])\n # from local xyz to ECEF xyz\n xyz = self.peg.rotation_matrix.dot(xyz_local) + self.peg.translation_vector\n return XYZ(xyz[0], xyz[1], xyz[2], self.peg.ellipsoid)", "def to_earth_location(self):\n # in astropy, x points north, y points east, so we need a minus for y.\n cart = CartesianRepresentation(self.x, -self.y, self.z)\n altaz = AltAz(cart, location=self.reference_location)\n return _altaz_to_earthlocation(altaz)", "def geodetic_from_ecef(x, y, z):\n #http://code.google.com/p/pysatel/source/browse/trunk/coord.py?r=22\n\n # load wgs constants\n wgs = wgs_constants()\n a = wgs.a\n b = wgs.b\n esq = wgs.esq\n e1sq = wgs.e1sq\n \n r = sqrt(x * x + y * y)\n Esq = a * a - b * b\n F = 54 * b * b * z * z\n G = r * r + (1 - esq) * z * z - esq * Esq\n C = (esq * esq * F * r * r) / (pow(G, 3))\n S = cbrt(1 + C + sqrt(C * C + 2 * C))\n P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)\n Q = sqrt(1 + 2 * esq * esq * P)\n r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \\\n P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)\n #U = sqrt(pow((r - esq * r_0), 2) + z * z)\n V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)\n Z_0 = b * b * z / (a * V)\n #h = U * (1 - b * b / (a * V))\n lat = arctan((z + e1sq * Z_0) / r)\n lon = arctan2(y, x)\n return lat, lon\n #return degrees(lat), degrees(lon)", "def ZCAM_to_XYZ(\n specification: CAM_Specification_ZCAM,\n XYZ_w: ArrayLike,\n L_A: ArrayLike,\n Y_b: ArrayLike,\n surround: InductionFactors_ZCAM = VIEWING_CONDITIONS_ZCAM[\"Average\"],\n discount_illuminant: bool = False,\n) -> NDArrayFloat:\n\n J_z, C_z, h_z, _S_z, _Q_z, M_z, _H, _H_Z, _V_z, _K_z, _W_z = astuple(\n specification\n )\n\n J_z = to_domain_1(J_z)\n C_z = to_domain_1(C_z)\n h_z = to_domain_degrees(h_z)\n M_z = to_domain_1(M_z)\n\n XYZ_w = to_domain_1(XYZ_w)\n _X_w, Y_w, _Z_w = tsplit(XYZ_w)\n L_A = as_float_array(L_A)\n Y_b = as_float_array(Y_b)\n\n F_s, F, c, N_c = surround\n\n # Step 0 (Forward) - Chromatic adaptation from reference illuminant to\n # \"CIE Standard Illuminant D65\" illuminant using \"CAT02\".\n # Computing degree of adaptation :math:`D`.\n D = (\n degree_of_adaptation(F, L_A)\n if not discount_illuminant\n else ones(L_A.shape)\n )\n\n # Step 1 (Forward) - Computing factors related with viewing conditions and\n # independent of the test stimulus.\n # Background factor :math:`F_b`\n F_b = np.sqrt(Y_b / Y_w)\n # Luminance level adaptation factor :math:`F_L`\n F_L = 0.171 * spow(L_A, 1 / 3) * (1 - np.exp(-48 / 9 * L_A))\n\n # Step 2 (Forward) - Computing achromatic response (:math:`I_{z,w}`),\n # redness-greenness (:math:`a_{z,w}`), and yellowness-blueness\n # (:math:`b_{z,w}`).\n with domain_range_scale(\"ignore\"):\n I_z_w, _A_z_w, _B_z_w = tsplit(\n XYZ_to_Izazbz(XYZ_w, method=\"Safdar 2021\")\n )\n\n # Step 1 (Inverse) - Computing achromatic response (:math:`I_z`).\n Q_z_p = (1.6 * F_s) / spow(F_b, 0.12)\n Q_z_m = spow(F_s, 2.2) * spow(F_b, 0.5) * spow(F_L, 0.2)\n Q_z_w = 2700 * spow(I_z_w, Q_z_p) * Q_z_m\n\n I_z_p = spow(F_b, 0.12) / (1.6 * F_s)\n I_z_d = 2700 * 100 * Q_z_m\n\n I_z = spow((J_z * Q_z_w) / I_z_d, I_z_p)\n\n # Step 2 (Inverse) - Computing chroma :math:`C_z`.\n if has_only_nan(M_z) and not has_only_nan(C_z):\n M_z = (C_z * Q_z_w) / 100\n elif has_only_nan(M_z):\n raise ValueError(\n 'Either \"C\" or \"M\" correlate must be defined in '\n 'the \"CAM_Specification_ZCAM\" argument!'\n )\n\n # Step 3 (Inverse) - Computing hue angle :math:`h_z`\n # :math:`h_z` is currently required as an input.\n\n # Computing eccentricity factor :math:`e_z`.\n e_z = 1.015 + np.cos(np.radians(89.038 + h_z % 360))\n h_z_r = np.radians(h_z)\n\n # Step 4 (Inverse) - Computing redness-greenness (:math:`a_z`), and\n # yellowness-blueness (:math:`b_z`).\n # C_z_p_e = 1.3514\n C_z_p_e = 50 / 37\n C_z_p = spow(\n (M_z * spow(I_z_w, 0.78) * spow(F_b, 0.1))\n / (100 * spow(e_z, 0.068) * spow(F_L, 0.2)),\n C_z_p_e,\n )\n a_z = C_z_p * np.cos(h_z_r)\n b_z = C_z_p * np.sin(h_z_r)\n\n # Step 5 (Inverse) - Computing tristimulus values :math:`XYZ_{D65}`.\n with domain_range_scale(\"ignore\"):\n XYZ_D65 = Izazbz_to_XYZ(tstack([I_z, a_z, b_z]), method=\"Safdar 2021\")\n\n XYZ = chromatic_adaptation_Zhai2018(\n XYZ_D65, TVS_D65, XYZ_w, D, D, transform=\"CAT02\"\n )\n\n return from_range_1(XYZ)", "def geod2ecef(geod):\n if len(geod.shape) == 1:\n lat = geod[0]\n lon = geod[1]\n if len(geod) == 3:\n alt = geod[2]\n else:\n alt = 0.0\n else:\n lat = geod[:, 0]\n lon = geod[:, 1]\n if geod.shape[1] == 3:\n alt = geod[:, 2]\n else:\n alt = 0.0\n\n a = 6378137\n e = 8.1819190842622e-2\n N = a / np.sqrt(1 - e**2 * np.sin(lat)**2)\n\n x = (N + alt) * np.cos(lat) * np.cos(lon)\n y = (N + alt) * np.cos(lat) * np.sin(lon)\n z = ((1-e**2) * N + alt) * np.sin(lat)\n\n if len(geod.shape) == 1:\n return np.array([x, y, z])\n else:\n return np.column_stack((x, y, z))", "def _celestial(self):\n cos = np.cos(self.lat)\n sin = np.sin(self.lat)\n transfo = np.matrix([ \n [0, -sin, cos],\n [1, 0, 0],\n [0, cos, sin]\n ])\n return transfo", "def wgs84_to_lv03(latitude, longitude, altitude=None):\n lat_sex = dms_to_sex(*dd_to_dms(dd=latitude))\n lon_sex = dms_to_sex(*dd_to_dms(dd=longitude))\n\n # Axiliary values\n lat_aux = (lat_sex - 169028.66) / 10000\n lon_aux = (lon_sex - 26782.5) / 10000\n\n east = \\\n 600072.37 \\\n + 211455.93 * lon_aux \\\n - 10938.51 * lon_aux * lat_aux \\\n - 0.36 * lon_aux * lat_aux ** 2 \\\n - 44.54 * lon_aux ** 3\n\n north = \\\n 200147.07 \\\n + 308807.95 * lat_aux \\\n + 3745.25 * lon_aux ** 2 \\\n + 76.63 * lat_aux ** 2 \\\n - 194.56 * lon_aux ** 2 * lat_aux \\\n + 119.79 * lat_aux ** 3\n\n height = None if altitude is None else \\\n altitude \\\n - 49.55 \\\n + 2.73 * lon_aux \\\n + 6.94 * lat_aux\n\n return east, north, height", "def ecl_frame(self):\n if self._alt_az_frame is None or self._ecl_frame is None:\n self._ecl_frame = self.alt_az_frame.transform_to(\n astropy.coordinates.HeliocentricTrueEcliptic)\n return self._ecl_frame", "def XYZ_to_ZCAM(\n XYZ: ArrayLike,\n XYZ_w: ArrayLike,\n L_A: ArrayLike,\n Y_b: ArrayLike,\n surround: InductionFactors_ZCAM = VIEWING_CONDITIONS_ZCAM[\"Average\"],\n discount_illuminant: bool = False,\n compute_H: bool = True,\n) -> CAM_Specification_ZCAM:\n\n XYZ = to_domain_1(XYZ)\n XYZ_w = to_domain_1(XYZ_w)\n _X_w, Y_w, _Z_w = tsplit(XYZ_w)\n L_A = as_float_array(L_A)\n Y_b = as_float_array(Y_b)\n\n F_s, F, _c, _N_c = surround\n\n # Step 0 (Forward) - Chromatic adaptation from reference illuminant to\n # \"CIE Standard Illuminant D65\" illuminant using \"CAT02\".\n # Computing degree of adaptation :math:`D`.\n D = (\n degree_of_adaptation(F, L_A)\n if not discount_illuminant\n else ones(L_A.shape)\n )\n\n XYZ_D65 = chromatic_adaptation_Zhai2018(\n XYZ, XYZ_w, TVS_D65, D, D, transform=\"CAT02\"\n )\n\n # Step 1 (Forward) - Computing factors related with viewing conditions and\n # independent of the test stimulus.\n # Background factor :math:`F_b`\n F_b = np.sqrt(Y_b / Y_w)\n # Luminance level adaptation factor :math:`F_L`\n F_L = 0.171 * spow(L_A, 1 / 3) * (1 - np.exp(-48 / 9 * L_A))\n\n # Step 2 (Forward) - Computing achromatic response (:math:`I_z` and\n # :math:`I_{z,w}`), redness-greenness (:math:`a_z` and :math:`a_{z,w}`),\n # and yellowness-blueness (:math:`b_z`, :math:`b_{z,w}`).\n with domain_range_scale(\"ignore\"):\n I_z, a_z, b_z = tsplit(XYZ_to_Izazbz(XYZ_D65, method=\"Safdar 2021\"))\n I_z_w, _a_z_w, _b_z_w = tsplit(\n XYZ_to_Izazbz(XYZ_w, method=\"Safdar 2021\")\n )\n\n # Step 3 (Forward) - Computing hue angle :math:`h_z`\n h_z = hue_angle(a_z, b_z)\n\n # Step 4 (Forward) - Computing hue quadrature :math:`H`.\n H = hue_quadrature(h_z) if compute_H else np.full(h_z.shape, np.nan)\n\n # Computing eccentricity factor :math:`e_z`.\n e_z = 1.015 + np.cos(np.radians(89.038 + h_z % 360))\n\n # Step 5 (Forward) - Computing brightness :math:`Q_z`,\n # lightness :math:`J_z`, colourfulness :math`M_z`, and chroma :math:`C_z`\n Q_z_p = (1.6 * F_s) / (F_b**0.12)\n Q_z_m = F_s**2.2 * F_b**0.5 * spow(F_L, 0.2)\n Q_z = 2700 * spow(I_z, Q_z_p) * Q_z_m\n Q_z_w = 2700 * spow(I_z_w, Q_z_p) * Q_z_m\n\n J_z = 100 * Q_z / Q_z_w\n\n M_z = (\n 100\n * (a_z**2 + b_z**2) ** 0.37\n * (\n (spow(e_z, 0.068) * spow(F_L, 0.2))\n / (F_b**0.1 * spow(I_z_w, 0.78))\n )\n )\n\n C_z = 100 * M_z / Q_z_w\n\n # Step 6 (Forward) - Computing saturation :math:`S_z`,\n # vividness :math:`V_z`, blackness :math:`K_z`, and whiteness :math:`W_z`.\n with sdiv_mode():\n S_z = 100 * spow(F_L, 0.6) * np.sqrt(sdiv(M_z, Q_z))\n\n V_z = np.sqrt((J_z - 58) ** 2 + 3.4 * C_z**2)\n\n K_z = 100 - 0.8 * np.sqrt(J_z**2 + 8 * C_z**2)\n\n W_z = 100 - np.sqrt((100 - J_z) ** 2 + C_z**2)\n\n return CAM_Specification_ZCAM(\n as_float(from_range_1(J_z)),\n as_float(from_range_1(C_z)),\n as_float(from_range_degrees(h_z)),\n as_float(from_range_1(S_z)),\n as_float(from_range_1(Q_z)),\n as_float(from_range_1(M_z)),\n as_float(from_range_degrees(H, 400)),\n None,\n as_float(from_range_1(V_z)),\n as_float(from_range_1(K_z)),\n as_float(from_range_1(W_z)),\n )", "def enu2xyz(e, n, u, x, y, z):\n lat, lon, hgt = xyz2llh(x,y,z)\n sl = np.sin(lon)\n cl = np.cos(lon)\n sf = np.sin(lat)\n cf = np.cos(lat)\n R = np.matrix([[-sl, -cl*sf, cl*cf],\n [cl, -sl*sf, sl*cf],\n [0e0, cf, sf]])\n enu = np.matrix([[e],[n],[u]])\n return [item for sublist in (R * enu).tolist() for item in sublist]", "def l93_to_etrs(positions: np.ndarray) -> np.ndarray:\n if (positions.ndim != 2) or (positions.shape[1] != 3):\n raise ValueError(\n f\"`positions` should be a (n, 3) numpy array, got {positions.shape} instead.\"\n )\n t = Transformer.from_crs(\n crs_from=\"EPSG:2154\", # RGF93\n crs_to=\"EPSG:4896\" # ITRF2005 / ETRS used in MS\n )\n positions[:, 0], positions[:, 1], positions[:, 2] = t.transform(\n xx=positions[:, 0],\n yy=positions[:, 1],\n zz=positions[:, 2]\n )\n return positions", "def ellipsoid2d(ellipsoid, orbitinc):\n errtext = 'Invalid excentricity value in ellipsoid model.'\n inrange(ellipsoid[1], 0, 1, exclude='upper', text=errtext)\n\n inrange(orbitinc, 0, 180,\n exclude='both',\n text='Invalid orbit inclination.')\n\n rp = ellipsoid_r_geocentric(ellipsoid, orbitinc)\n\n return ellipsoid[0], np.sqrt(1 - (rp / ellipsoid[0])**2)", "def enu(self, o_xyz=None, o_llh=None):\n if o_llh is None: o_llh = o_xyz.llh()\n if o_xyz is None: o_xyz = o_llh.xyz(ellipsoid=self.ellipsoid)\n enu_to_xyz = enu_to_xyz_matrix(o_llh.lon, o_llh.lat)\n return ENU(*enu_to_xyz.T.dot(self()-o_xyz()),o_llh=o_llh,o_xyz=o_xyz)", "def geo_to_etrs(location: EarthLocation = nenufar_position) -> np.ndarray:\n gps_b = 6356752.31424518\n gps_a = 6378137\n e_squared = 6.69437999014e-3\n lat_rad = location.lat.rad\n lon_rad = location.lon.rad\n alt = location.height.value\n if location.isscalar:\n xyz = np.zeros((1, 3))\n else:\n xyz = np.zeros((location.size, 3))\n gps_n = gps_a / np.sqrt(1 - e_squared * np.sin(lat_rad) ** 2)\n xyz[:, 0] = (gps_n + alt) * np.cos(lat_rad) * np.cos(lon_rad)\n xyz[:, 1] = (gps_n + alt) * np.cos(lat_rad) * np.sin(lon_rad)\n xyz[:, 2] = (gps_b**2/gps_a**2*gps_n + alt) * np.sin(lat_rad)\n return xyz", "def get_f_elev(satellite):\n\n if tle_files is not None:\n filelist = glob(tle_files)\n tle_file = max(filelist, key=lambda x: os.stat(x).st_mtime)\n else:\n tle_file = None\n\n orb = Orbital(satellite.upper(), tle_file)\n\n def f_elev(utctime):\n \"\"\"Get the elevation for the given *utctime*.\n \"\"\"\n return orb.get_observer_look(utctime, *coords)[1]\n f_elev.satellite = satellite\n return f_elev", "def ecef2eci(R_ECEF,time): \n #\n # T is the Julian Date in julian centuries\n #\n d = time - 2451545.0;\n T = d/ 36525;\n #\n # Compute Greenwich Mean sidereal Time (in hours)\n #\n GMST = 2*np.pi*(0.7790572732640 + 1.00273781191125448*d)\n # \n # Compute Rotation Matrix\n #\n R_ECI = np.zeros((3,np.size(T)))\n #\n for i in range(0,np.size(T)):\n RT = np.array([[+np.cos(GMST[i]), -np.sin(GMST[i]), 0], \n [ +np.sin(GMST[i]),+np.cos(GMST[i]), 0], \n [0 , 0 , 1 ]]);\n #\n R_ECI[:,i] = np.matmul(RT,R_ECEF.T)\n #\n \n return R_ECI", "def ellipsoidal_projection(coordinates, inverse=False):\n ## Constants measured experimentally\n K11, K12, K13 = 111.13209, -0.56605, 0.00120\n K21, K22, K23 = 111.41513, -0.09455, 0.00012\n\n aux0 = coordinates[:, 0]\n aux1 = coordinates[:, 1]\n ## Projection\n aux0 = (K21*np.cos(aux1)+K22*np.cos(3*aux1)+K23*np.cos(5*aux1))*aux0\n aux1 = (K11+K12*np.cos(2*aux1)+K13*np.cos(4*aux1))*aux1\n aux0 = 180./np.pi*aux0\n aux1 = 180./np.pi*aux1\n\n coordinates[:, 0] = aux0\n coordinates[:, 1] = aux1\n\n return coordinates", "def enu(self, o_xyz=None, o_llh=None, ellipsoid=WGS84):\n if o_xyz is not None: ellipsoid = o_xyz.ellipsoid\n return self.xyz(ellipsoid).enu(o_xyz=o_xyz,o_llh=o_llh)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms the SEZ position (SouthEastZenith) into Azimuth, Elevation, Range.
def sez2AzElRange(r_sez): range = np.linalg.norm(r_sez) rx = r_sez[0] ry = r_sez[1] rz = r_sez[2] elevation = np.arcsin(rz/range) azimuth = np.arctan2(ry, -rx) if azimuth < 0: azimuth = azimuth + 2*np.pi return np.array([azimuth, elevation, range])
[ "def convert_coords(self, stz):\n return np.array(\n [stz[0], np.mod(stz[1], 2.0 * np.pi), np.mod(stz[2], 2.0 * np.pi)],\n dtype=np.float64,\n )", "def seg_z_range(self,t_secs):\n vols=self.volumes(t_secs)\n areas=self.planform_areas().data # a top-down area for each segment.\n dzs=vols/areas\n\n # depth below water surface of the bottom of the segment prism\n self.infer_2d_elements()\n n_layer = self.n_seg // self.n_2d_elements\n dzs_2d=dzs.reshape((n_layer,-1))\n seg_zbot=np.cumsum(dzs_2d,axis=0).ravel()\n seg_ztop=seg_zbot-dzs\n return seg_ztop,seg_zbot", "def get_mc_shower_azimuth(self):\n return self.lib.get_mc_shower_azimuth()", "def xyz_from_hdz(h, d, z):\n d = math.radians(d)\n north = h * math.cos(d)\n east = h * math.sin(d)\n return north, east, z", "def get_zs(self, z):\n\n return self[0].get_zs(z)", "def upper_elevation(self):\n return self.pinlist[0].GetGroup().GetPlacementLayer().GetUpperElevation()", "def get_SN_HST_coord(SNID):\n # get position from SDSS information\n SDSS_SNPosition = get_SN_SDSS_coord(SNID)\n print('SDSS position: ', SDSS_SNPosition.to_string(style=u'hmsdms'))\n\n #skip 13038 because I don't have a shift\n if SNID == 13038:\n warnings.warn('SN13038 does not have a shift')\n SNPosition = SDSS_SNPosition\n else:\n # import shift data\n #todo(change this location to be dataset.csv)\n shift = Table.read('resources/shift.csv', format='ascii.commented_header')\n #todo(add units to table, from shift.meta)\n shift['delta RA'].unit, shift['delta Dec'].unit = u.arcsec, u.arcsec\n\n # apply shift with delta = HST - SDSS or HST = delta + SDSS\n # `[0]` are added to drop unneeded array wrapper. \n # It also makes an internal standard between hst-coords and sdss-cords\n deltaRA = Longitude(\n shift[shift['SDSS SN Name'] == SNID]['delta RA'].quantity[0]\n )\n deltaDec = Latitude(\n shift[shift['SDSS SN Name'] == SNID]['delta Dec'].quantity[0]\n )\n SNPosition = SkyCoord(ra = deltaRA + SDSS_SNPosition.ra,\n dec = deltaDec + SDSS_SNPosition.dec)\n print('deltas: ', deltaRA, deltaDec)\n print('SNPosition: ', SNPosition.to_string(style=u'hmsdms'))\n return SNPosition", "def upper_elevation(self):\n return self.pin.GetGroup().GetPlacementLayer().GetUpperElevation()", "def determineZeta(pmin,smax,R0=220.,Z0=1e4,L=200.):\n prange = np.linspace(.1,1.,100)\n v = np.array([onaxisMerit(pmin,smax,R0=R0,Z0=Z0,L=L,psi=p) for p in prange])\n try:\n return max(prange[v==0.]),0.\n except:\n return prange[np.argmin(v)],v.min()/1e3", "def azimuth(neurite):\n return morphmath.azimuth_from_vector(\n morphmath.vector(neurite.root_node.points[0], morph.soma.center)\n )", "def _SouthPosition(self,position):\n return (position[0]+1,position[1])", "def coordinates2Region():\n\tpass", "def get_azimuth(self):\n co = self.get_car()\n return np.arctan2(co[0], co[1])", "def fmse_zonal_deriv_eta(temp, z, q, q_ice, ps, bk, pk):\n deriv_obj = SphereEtaCenDeriv(fmse(temp, z, q, q_ice), pk, bk, ps)\n return deriv_obj.d_dx_const_p()", "def _az_alt( self, lat, lon, utc_offset, sun ):\n # Sun's Mean Longitude, L, gives us GMT at midnight.\n # GMST0 = (L + 180)/15\n GMST0 = ((sun.L+180)/15) % 24\n\n # Local Sidereal Time = GMST0 + UT + LON/15\n self.LST = GMST0 + utc_offset + lon/15\n\n # Hour Angle (in degrees) = 15*(LST - RA (in hours))\n self.HA = (15*(self.LST - self.RA/15)) % 360\n\n # celestial rectangular (x,y,z) coordinate\n x = math.cos(math.radians(self.HA)) * math.cos(math.radians(self.Decl))\n y = math.sin(math.radians(self.HA)) * math.cos(math.radians(self.Decl))\n z = math.sin(math.radians(self.Decl))\n\n # rotate this x,y,z system along the Y axis\n xhor = x*math.sin(math.radians(lat)) - z*math.cos(math.radians(lat))\n yhor = y\n zhor = x*math.cos(math.radians(lat)) + z*math.sin(math.radians(lat))\n\n self.azimuth = math.degrees( math.atan2( yhor, xhor ) ) + 180\n self.altitude = math.degrees( math.atan2( zhor, math.sqrt(xhor**2+yhor**2) ) )\n\n return self.azimuth, self.altitude", "def to_earth_location(self):\n # in astropy, x points north, y points east, so we need a minus for y.\n cart = CartesianRepresentation(self.x, -self.y, self.z)\n altaz = AltAz(cart, location=self.reference_location)\n return _altaz_to_earthlocation(altaz)", "def zRange(self):\n return self.z0, self.zMax", "def star_zenith_azimuth(model_time, lon, lat):\n\n ra, dec = right_ascension_declination(model_time)\n h_angle = local_hour_angle(model_time, lon, ra)\n\n zenith = np.arccos(np.sin(lat)*np.sin(dec) +\n np.cos(lat) * np.cos(dec) * np.cos(h_angle))\n\n azimuth = np.arctan2(-np.sin(h_angle), (np.cos(lat)*np.tan(dec) -\n np.sin(lat)*np.cos(h_angle)))\n\n return zenith, azimuth", "def object_az_el(source, site, year, doy):\n try:\n coords = APcn.get_icrs_coordinates(source)\n except APcn.NameResolveError as details:\n raise APcn.NameResolveError(details)\n module_logger.debug(\"Sky coords: %s\", coords)\n \n try:\n dss = C.DSS(site)\n module_logger.debug(\"DSS-%d: %f, %f\", site, dss.long*180/pi, dss.lat*180/pi)\n except KeyError:\n raise KeyError('%d is not a valid DSS station' % site)\n loc = APc.EarthLocation(dss.long*u.rad, dss.lat*u.rad)\n module_logger.debug(\"Site coords: %s\", loc)\n \n if doy:\n mjd = DT.MJD(year,doy)\n else:\n raise RuntimeError(\"no DOY given\")\n tt = APt.Time(mjd, format='mjd')\n module_logger.debug(\"ISO time = %s\", tt.iso)\n tt.delta_ut1_utc = 0\n coords.obstime = tt\n coords.location = loc\n return coords.altaz", "def z0(self):\n\t\treturn self.__xyz0[2]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tranforms from ECI to right ascension, declination, and range.
def eci2RightAscensionDeclinationRange(r_eci): x = r_eci[0] y = r_eci[1] z = r_eci[2] r_xy = np.sqrt(x**2+y**2) r = np.sqrt(x**2+y**2+z**2) rightAs = np.arctan2(y, x) dec = np.arctan2(z,r_xy) # declination is between -90 and 90 return np.array([rightAs, dec, r])
[ "def read_euler(self):\n data = self.bus.read_i2c_block_data(self.address, 0x1A, 6)\n return self.parse_axis(data, 16)", "def prove_range(amount, last_mask=None):\n C, a, R = tcry.gen_range_proof(amount, last_mask)\n\n # Trezor micropython extmod returns byte-serialized/flattened rsig\n nrsig = b\"\"\n for i in range(len(R.asig.s0)):\n nrsig += bytes(R.asig.s0[i])\n for i in range(len(R.asig.s1)):\n nrsig += bytes(R.asig.s1[i])\n nrsig += bytes(R.asig.ee)\n for i in range(len(R.Ci)):\n nrsig += bytes(R.Ci[i])\n return C, a, nrsig\n\n # # Rewrap to serializable structures\n # nrsig = xmrtypes.RangeSig()\n # nrsig.asig = xmrtypes.BoroSig()\n # nrsig.asig.ee = bytes(R.asig.ee)\n # nrsig.Ci = list(R.Ci)\n # nrsig.asig.s0 = list(R.asig.s0)\n # nrsig.asig.s1 = list(R.asig.s1)\n # del R\n #\n # for i in range(64):\n # nrsig.Ci[i] = bytes(nrsig.Ci[i])\n # nrsig.asig.s0[i] = bytes(nrsig.asig.s0[i])\n # nrsig.asig.s1[i] = bytes(nrsig.asig.s1[i])\n #\n # return C, a, nrsig", "def sd_to_aces_relative_exposure_values(\n sd,\n illuminant=SDS_ILLUMINANTS['D65'],\n apply_chromatic_adaptation=False,\n chromatic_adaptation_transform='CAT02'):\n\n shape = MSDS_ACES_RICD.shape\n if sd.shape != MSDS_ACES_RICD.shape:\n sd = sd.copy().align(shape)\n\n if illuminant.shape != MSDS_ACES_RICD.shape:\n illuminant = illuminant.copy().align(shape)\n\n s_v = sd.values\n i_v = illuminant.values\n\n r_bar, g_bar, b_bar = tsplit(MSDS_ACES_RICD.values)\n\n def k(x, y):\n \"\"\"\n Computes the :math:`K_r`, :math:`K_g` or :math:`K_b` scale factors.\n \"\"\"\n\n return 1 / np.sum(x * y)\n\n k_r = k(i_v, r_bar)\n k_g = k(i_v, g_bar)\n k_b = k(i_v, b_bar)\n\n E_r = k_r * np.sum(i_v * s_v * r_bar)\n E_g = k_g * np.sum(i_v * s_v * g_bar)\n E_b = k_b * np.sum(i_v * s_v * b_bar)\n\n E_rgb = np.array([E_r, E_g, E_b])\n\n # Accounting for flare.\n E_rgb += FLARE_PERCENTAGE\n E_rgb *= S_FLARE_FACTOR\n\n if apply_chromatic_adaptation:\n xy = XYZ_to_xy(sd_to_XYZ(illuminant) / 100)\n NPM = normalised_primary_matrix(RGB_COLOURSPACE_ACES2065_1.primaries,\n xy)\n XYZ = RGB_to_XYZ(E_rgb, xy, RGB_COLOURSPACE_ACES2065_1.whitepoint, NPM,\n chromatic_adaptation_transform)\n E_rgb = XYZ_to_RGB(XYZ, RGB_COLOURSPACE_ACES2065_1.whitepoint,\n RGB_COLOURSPACE_ACES2065_1.whitepoint,\n RGB_COLOURSPACE_ACES2065_1.matrix_XYZ_to_RGB)\n\n return from_range_1(E_rgb)", "def bias_to_energy_range(self, V):\n \n if V > 0:\n min_E = self.fermi_level\n max_E = self.fermi_level + V\n else:\n min_E = self.fermi_level + V\n max_E = self.fermi_level\n return min_E, max_E", "def map_to_range(orientation):\n return orientation - 360 * floor((orientation + 180) * (1 / 360))", "def _radec(self,*args,**kwargs):\n lbd= self._lbd(*args,**kwargs)\n return coords.lb_to_radec(lbd[:,0],lbd[:,1],degree=True)", "def process_ir_front(ir_left, ir_left_center, ir_right_center, ir_right):\n # Do something with the front IR sensors, values are in meters\n ir_left_distance = ir_left.range\n ir_right_center_distance = ir_left_center.range\n ir_right_center_distance = ir_right_center.range\n ir_right_distance = ir_right.range", "def add_ir_range(features: pd.DataFrame) -> pd.DataFrame:\r\n features['IR_range'] = features.IR_max - features.IR_min\r\n\r\n return features", "def get_euler_rotation_between(start, end):\n # Gets the rotation by converting Euler angles to rotation matrices and composing\n # return end.to_quaternion().rotation_difference( start.to_quaternion() ).to_euler()\n return (end.to_matrix() * start.to_matrix().inverted()).to_euler()", "def ecef2eci(R_ECEF,time): \n #\n # T is the Julian Date in julian centuries\n #\n d = time - 2451545.0;\n T = d/ 36525;\n #\n # Compute Greenwich Mean sidereal Time (in hours)\n #\n GMST = 2*np.pi*(0.7790572732640 + 1.00273781191125448*d)\n # \n # Compute Rotation Matrix\n #\n R_ECI = np.zeros((3,np.size(T)))\n #\n for i in range(0,np.size(T)):\n RT = np.array([[+np.cos(GMST[i]), -np.sin(GMST[i]), 0], \n [ +np.sin(GMST[i]),+np.cos(GMST[i]), 0], \n [0 , 0 , 1 ]]);\n #\n R_ECI[:,i] = np.matmul(RT,R_ECEF.T)\n #\n \n return R_ECI", "def SetCGII(self, cotaGeratriz):\r\n self.CGII = cotaGeratriz\r\n \r\n self.CGSI = (self.CGII + self.D) \r\n self.Desnivel = (self.CGII - self.CGIF)\r\n self.Iadotada = (self.Desnivel/self.L)\r\n self.alturaInicioTrecho = (self.CGII - self.PV1.CotaFundo)\r\n self.coberturaInicioTrecho = (self.PV1.CotaTerreno - self.CGSI - 2*self.EspessuraTubo)", "def E2V(E):\r\n# for energy in mev returns velocity in m/s\r\n return sqrt(E/5.227e-6)", "def CruiseCO2Emissions(self):\n return ( self.CruiseFuelBurn * self.Fuel.emissions_factor * \n self.Fuel.lower_heating_value / self.Aircraft['Cruise Speed'] / \n self.Aircraft['Max Seats'] / ureg['passenger']).to('kg/km/passenger')", "def dealias_range_elev_derivatives(radar): \n swp_start = np.ma.getdata(radar.sweep_start_ray_index['data'])\n swp_end = np.ma.getdata(radar.sweep_end_ray_index['data']) \n rrange = np.ma.getdata(radar.range['data']) # - 1D array [range]\n elevations = radar.elevation['data'] # - 1D array of elevations in deg \n el_fix = np.radians(elevations) \n # -- Dealias the velocity field\n dealias = pyart.correct.dealias_region_based(radar, vel_field = 'corrected_velocity')\n v_dealias = np.ma.getdata(dealias['data'])\n # -- dVRdr\n dVRdr = np.gradient(v_dealias, rrange, axis=1) # axis 1 = range axis\n # -- dVRde\n # -- I have to do like this because of the different sweeps\n dVRde = np.zeros_like(v_dealias)\n\n for i in np.arange(len(swp_start)):\n dVRde[swp_start[i],:] = (v_dealias[swp_start[i] + 1,:] - v_dealias[swp_start[i],:]) / (rrange * (el_fix[swp_start[i] + 1] - el_fix[swp_start[i]]))\n dVRde[swp_end[i],:] = (v_dealias[swp_end[i],:] - v_dealias[swp_end[i] - 1,:]) / (rrange * (el_fix[swp_end[i]] - el_fix[swp_end[i] - 1]))\n\n s = np.arange(len(el_fix))\n ss = np.delete(s,[swp_start,swp_end])\n for i in ss:\n dVRde[i,:] = (v_dealias[i + 1,:] - v_dealias[i - 1,:]) / (rrange * (el_fix[i + 1] - el_fix[i - 1]))\n\n # -- Adding fields to radar file\n dVRde = {'data' : dVRde}\n radar.add_field('dVRde', dVRde, replace_existing = True)\n\n dVRdr = {'data' : dVRdr}\n radar.add_field('dVRdr', dVRdr, replace_existing = True)\n\n radar.add_field('vel_dealias', dealias, replace_existing = True)\n\n return radar", "def ecef2eci(r_ecef, GMST):\n DCM = ROT3(-GMST) # Rotation matrix\n\n r_eci = DCM.dot(r_ecef)\n\n return r_eci", "def __get_range(self):\n return self.high - self.low", "def _evalue_RR(self, est, lo=None, hi=None, no_effect_baseline=1):\n if est < 0:\n raise ValueError(\"Risk/Rate Ratio cannot be negative\")\n if no_effect_baseline < 0:\n raise ValueError(\"no_effect_baseline value is impossible\")\n if no_effect_baseline != 1:\n self.logger.info(\n 'You are calculating a \"non-null\" E-value, i.e., an E-value for the minimum amount of unmeasured '\n \"confounding needed to move the estimate and confidence interval to your specified no_effect_baseline value \"\n \"rather than to the null value.\"\n )\n if lo is not None and hi is not None:\n if lo > hi:\n raise ValueError(\"Lower confidence limit should be less than upper confidence limit\")\n if lo is not None and est < lo:\n raise ValueError(\"Point estimate should be inside confidence interval\")\n if hi is not None and est > hi:\n raise ValueError(\"Point estimate should be inside confidence interval\")\n\n e_est = self._threshold(est, no_effect_baseline=no_effect_baseline)\n e_lo = self._threshold(lo, no_effect_baseline=no_effect_baseline)\n e_hi = self._threshold(hi, no_effect_baseline=no_effect_baseline)\n\n # if CI crosses null, set its E-value to 1\n null_CI = False\n if est > no_effect_baseline and lo is not None:\n null_CI = lo < no_effect_baseline\n if est < no_effect_baseline and hi is not None:\n null_CI = hi > no_effect_baseline\n if null_CI:\n e_lo = np.float64(1)\n e_hi = np.float64(1)\n\n # only report E-value for CI limit closer to null\n if lo is not None or hi is not None:\n if est > no_effect_baseline:\n e_hi = None\n else:\n e_lo = None\n\n return {\n \"converted_estimate\": est,\n \"converted_lower_ci\": lo,\n \"converted_upper_ci\": hi,\n \"evalue_estimate\": e_est,\n \"evalue_lower_ci\": e_lo,\n \"evalue_upper_ci\": e_hi,\n }", "def estimateEndoCoefs(node,times,G,transtype,ZERO,maxtime): \n assert transtype in [\"i2r\",\"e2i\"]\n statemap = {\"s\":Trace.SUSCEPTIBLE,\"e\":Trace.EXPOSED,\"i\":Trace.INFECTED,\"r\":Trace.RECOVERED}\n curstate,nextstate = [statemap[state] for state in transtype.split(\"2\")]\n if transtype == \"i2r\":\n transname = Trace.I2R \n elif transtype == \"e2i\":\n transname = Trace.E2I \n\n coef = 0.0\n if times.has_key(curstate) and times.has_key(nextstate):\n p1 = Dist.genPartDist(G.node[node][transname][0],G.node[node][transname][1],\"normal\")\n pdist = Dist.genPdf(p1) \n diftime = times[nextstate] - times[curstate]\n if pdist.has_key(diftime) and pdist[diftime] >= ZERO:\n coef = -1.0 * math.log(pdist[diftime])\n else:\n coef = -1.0 * math.log(ZERO) \n elif times.has_key(curstate):\n p1 = Dist.genPartDist(G.node[node][transname][0],G.node[node][transname][1],\"reverseCdf\")\n fdist = Dist.genPdf(p1) \n diftime = maxtime - times[curstate]\n if diftime == 0:\n coef = 0.0\n elif fdist.has_key(diftime) and fdist[diftime] >= ZERO:\n coef = -1.0 * math.log(fdist[diftime])\n else:\n coef = -1.0 * math.log(ZERO)\n return coef", "def range_process( instrument, raw, max_range, constants\n ,rs_cal, rs_Cxx, corr_adjusts ,processing_defaults):\n\n\n \n\n assert(rs_Cxx is not None)\n rs = hau.Time_Z_Group(like=raw)\n\n if 0:\n import matplotlib.pylab as plt\n \n mol = np.nanmean(raw.molecular_counts,0)\n wfov = np.nanmean(raw.molecular_wfov_counts,0)\n bin_vec = 7.5 * np.arange(len(wfov))\n mol = mol - np.nanmean(mol[0:40])\n wfov = wfov - np.nanmean(wfov[0:40])\n mol *= (bin_vec-45*7.5)**2\n wfov *= (bin_vec-45*7.5)**2\n wfov *= np.exp(-2*bin_vec *1e-5)\n #wfov = wfov - bin_vec*wfov[900]/(900 *0.0001)\n wfov *= mol[900]/wfov[900]\n plt.figure(99999)\n plt.plot(bin_vec,wfov,'c',bin_vec,mol,'r')\n ax=plt.gca()\n ax.set_yscale('log')\n plt.grid(True)\n plt.show()\n print j\n #copy corrected raw into rs \n for field in ['transmitted_1064_energy','transmitted_energy','seeded_shots','molecular_counts'\n ,'combined_lo_counts','combined_hi_counts','cross_pol_counts',\n 'combined_wfov_counts','molecular_wfov_counts',\n 'molecular_i2a_counts','combined_1064_counts','telescope_pointing']:\n if hasattr(raw,field):\n setattr(rs,field,getattr(raw,field).copy())\n setattr(rs,'raw_'+field,getattr(raw,field).copy())\n \n # compute bin number of laser pulse\n [dark_interval_end_time, laser_pulse_time, cal_pulse_end_time] = \\\n constants['apd_pulse_timing']\n bin_duration = constants['binwidth']\n s_bin = int(laser_pulse_time / bin_duration) # laser pulse bin number\n #dark_interval_end_bin = int(dark_interval_end_time / bin_duration)- 1\n\n nalts = raw.molecular_counts.shape[1]\n\n #save geo corrected raw counts as 'var_xxx' in rs so that they get averaged without\n #other range processing for use in compute_photon_statistics. We also multiply\n #by the square of the geocorrection to account for the geocorrection in the\n #signal used compute_phothon_statistics()\n if processing_defaults.enabled('compute_stats'):\n ones_array = np.ones(raw.molecular_counts.shape)\n # bin 0 of geo_correction is defined as occurring at the laser pulse\n geocorr = ones_array.copy()\n geocorr[:,s_bin:] = rs_cal.geo.data[:nalts-s_bin, 1] * ones_array[:,s_bin:]\n \n for field in ('molecular_counts','combined_lo_counts'\n ,'combined_hi_counts','cross_pol_counts','combined_wfov_counts'\n ,'molecular_wfov_counts','molecular_i2a_counts','combined_1064_counts'):\n if hasattr(raw,field):\n setattr(rs,'var_raw_'+field,getattr(raw,field)*geocorr*geocorr) \n \n #counts arrays are the average number of counts in a data raw acquistion interval\n #of raw.times[2]-raw.times[1] while seeded_shots is the total number of laser pulses\n #the acquisition interval prior to preaveraging in preprocess_raw.py\n\n #note: this does not compensate for the pileup correction--in very high count areas this\n #will under estimate the varience because actual counts are multipled by a pileup correction\n #in the preprocess_raw.py routine \n\n #counts have been pileup corrected in preprocessing\n #do dark correction for all channels\n \n s_time =datetime.utcnow()\n dark_count_correction(instrument,raw,rs,rs_Cxx,corr_adjusts,processing_defaults,constants)\n print 'time for dark correction = ',datetime.utcnow() - s_time\n\n # gain correction for nadir pointing in airborne operation\n # this is expected to be a very small correction with little\n # impact on signal statitics\n if 'installation' in constants and constants['installation'] == 'airborne' \\\n and constants['nadir_comb_gain_adjustment'] != 1.0:\n print 'Apply nadir gain adjustment'\n print 'nadir gain adj= ', constants['nadir_comb_gain_adjustment']\n ix = np.arange(rs.telescope_pointing.shape[0])\n indices = ix[rs.telescope_pointing[:] < 0.1]\n nadir_gain_adj = constants['nadir_comb_gain_adjustment']\n rs.combined_lo_counts[indices, :] *= nadir_gain_adj\n rs.combined_hi_counts[indices, :] *= nadir_gain_adj\n \n #np.set_printoptions(threshold='nan')\n \n #do baseline correction\n rs = baseline_correction(rs,rs_cal,nalts,corr_adjusts,constants)\n \n # correct for differential geometry between 1064 and 532 nm channels\n rs = diff_1064_532_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(67)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b')\n ax=plt.gca()\n ax.set_xscale('log') \n #do combined-molecular differential geo correction if available\n rs = diff_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(68)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b')\n ax=plt.gca()\n ax.set_xscale('log') \n \n # Matt Add: do cross polarization differential geometry correction\n rs = diff_cp_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n \n # do i2a differential geo correction if present and relavent to instrument\n if hasattr(rs,'molecular_i2a_counts') and corr_adjusts['i2a_dif_geo_corr'] > 0:\n rs = i2a_diff_geo_correction(rs,rs_cal,corr_adjusts)\n\n #create combined_counts from combined_hi and combined_lo profiles\n rs = merge_combined_hi_and_lo(rs,constants)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(69)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b'\n ,np.nanmean(rs.combined_lo_counts,0),np.arange(len(rs.combined_lo_counts[0,:])),'c'\n ,np.nanmean(rs.cross_pol_counts,0),np.arange(len(rs.cross_pol_counts[0,:])),'g'\n ,np.nanmean(rs.combined_counts,0),np.arange(len(rs.combined_counts[0,:])),'k')\n ax=plt.gca()\n ax.set_xscale('log')\n #plt.show()\n\n print 'cp/mol'\n \"\"\"\n if processing_defaults.enabled('wfov_geo_corr') and hasattr(rs,'molecular_wfov_counts'):\n #do geometry correction after adjusting geo_corr with wide-field-of-view data.\n geo_corr = rs_cal.geo.data[:4000,1]\n s_bin = np.int(constants['apd_pulse_timing'][1]/constants['binwidth'])\n wfov_ratios = np.zeros(rs.molecular_wfov_counts.shape[1])\n wfov_ratios[:-s_bin] = nanmean(rs.molecular_wfov_counts[:,s_bin:],0)\\\n / nanmean(rs.molecular_counts[:,s_bin:],0) \n wfov_geometry_correction(rs,wfov_ratios,geo_corr,processing_defaults,constants,corr_adjusts)\n \"\"\"\n #does wfov corr exist?\n if processing_defaults.enabled('wfov_corr') and hasattr(rs,'molecular_wfov_counts')\\\n and hasattr(rs_cal,'geo')\\\n and hasattr(rs_cal.geo,'wfov_mol_ratio'):\n \n \n #add pre-trigger bins to wfov_mol_ratio array provided in geofile_default_file\n #and add to structure for use in extinction processing\n calibration_wfov_mol_ratio = np.zeros(rs.molecular_counts.shape[1])\n calibration_wfov_mol_ratio[s_bin:] = \\\n rs_cal.geo.wfov_mol_ratio[:(rs.molecular_counts.shape[1]-s_bin)]\n rs.calibration_wfov_mol_ratio = hau.Z_Array(calibration_wfov_mol_ratio)\n \n # do the normal geometric correction on the following variables\n select = ['molecular_counts','combined_lo_counts','combined_hi_counts'\n ,'molecular_i2a_counts','combined_1064_counts','molecular_wfov_counts'\n ,'combined_counts','cross_pol_counts']\n rs = lu.geometry_correction(select,rs,rs_cal,nalts,s_bin,corr_adjusts['geo_corr'])\n \n #mask close range bin counts\n first_bin_to_process = processing_defaults.get_value('first_bin_to_process','bin_number')\n for field in ['combined_hi_counts','combined_lo_counts','combined_wfov_counts','molecular_wfov_counts'\n 'molecular_i2a_counts','molecular_counts','cross_pol_counts','combined_counts'\\\n 'combined_1064_counts']:\n if hasattr(rs,field):\n getattr(rs,field)[:, :(s_bin+first_bin_to_process)] = np.NaN\n \n return rs", "def to_euler(vector0, vector1, aim_axis=0, up_axis=1, axes=XYZ, extrapolate=False): \n \n vector0 = _setDimension(vector0,2)\n vector1 = _setDimension(vector1,2)\n aim_axis = _setDimension(aim_axis,1,dtype=np.int32) % 3\n up_axis = _setDimension(up_axis,1,dtype=np.int32) % 3\n axes = _setDimension(axes,1,dtype=np.int32)\n \n vector0, vector1, aim_axis, up_axis, axes = _matchDepth(vector0, vector1, aim_axis, up_axis, axes)\n \n return _matrixToEuler(_vectorToMatrix(vector0, vector1, aim_axis, up_axis), axes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the time derivatives of a unit vector given by a/a_norm.
def computeUnitVectorDerivatives(a, a_dot, a_ddot = None): a_inner = np.inner(a,a) a_norm = np.sqrt(a_inner) a_outer = np.outer(a,a) a_dot_inner = np.inner(a_dot, a_dot) a_dot_outer = np.outer(a_dot, a_dot) r = a/a_norm r_dot = a_dot/a_norm - a_outer.dot(a_dot)/a_norm**3 if a_ddot is not None: r_ddot = a_ddot/a_norm - (2*a_dot_outer.dot(a) + a_dot_inner * a + a_outer.dot(a_ddot))/a_norm**3 + (a_outer.dot(a_dot_outer).dot(a))/a_norm**5 else: r_ddot = None return (r, r_dot, r_ddot)
[ "def _derivadot(self, a):\n #verified correct by putting 5 different a's into mathematica and comparing.\n numerator = - (self._Om) + 2 * (1 - self._Om) * (a ** 3)\n denominator = 2 * np.sqrt((a ** 3) * (self._Om) + (1 - self._Om) * (a ** 6))\n return numerator/denominator", "def DtDt(u, dt):\n return (u(t).subs(t, t+dt) - 2*u(t) + u(t).subs(t, t-dt))/dt**2", "def state_vector_derivative(x0, t0, mu):\n x, xdot = x0[:3], x0[3:]\n xdotdot = -mu / (np.linalg.norm(x)) ** 3 * x\n return np.r_[xdot, xdotdot]", "def Df(x, t):\n#######\n if type(x) <> 'numpy.ndarray': x = numpy.array(x) # convert to numpy array\n N = x.shape[0] # length of the original array \n df = [] # initial derivative empyy list\n for k in range(N): # loop for calculation \n if k == 0: # first point case\n dx = x[k + 1] - x[k]\n dt = t[k + 1] - t[k]\n elif k == N - 1: # last point case\n dx = x[k] - x[k - 1]\n dt = t[k] - t[k - 1]\n else: # remaining cases\n dx = x[k + 1] - x[k - 1]\n dt = t[k + 1] - t[k - 1] \n df.append(dx/dt) # add point to the list\n return numpy.array(df)", "def _time_derivative(self,xi):\n return np.dot(sym.Jacobian(xi,self.q_o),self.v_o)+np.dot(sym.Jacobian(xi,self.q_m),self.u_m)", "def derivative(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass", "def derivative(data, dt):\n\tdata = np.insert(data, 0, data[0])\n\tdata = np.diff(data/dt)\n\treturn data", "def _delta(alpha):\n return alpha / np.sqrt(1 + alpha**2)", "def func_diff_signal_vector_mag(t, a):\n mask = ~(np.isnan(t) | np.isnan(a))\n t = t[mask]\n a = a[mask]\n if len(a) > 2:\n k = 2\n del_t = np.diff(t)[k-1:]\n a_dk = np.abs(a[k:] - a[k-1:-1])\n a_dk_minus1 = np.abs(a[k-1:-1] - a[k-2:-2])\n avg_a_dk = (a_dk_minus1 + a_dk)/2.0\n T = np.nanmax(t) - np.nanmin(t)\n dsvm = np.nansum(avg_a_dk * del_t) / T\n else:\n dsvm = np.nan\n return dsvm", "def dl(self, part=False):\n\t\tif part:\n\t\t\treturn self.dtheta()*np.sqrt(self.eval_ds()**2 + self.eval_s()**2)\n\t\telse:\n\t\t\treturn self.dtheta()*np.tile(np.sqrt(self.eval_ds()**2 + self.eval_s()**2), self.n_antennas)", "def test_differentiate_vec():\n t = np.linspace(0, 4, 9)\n u = 2*np.sin(t) + 7\n dudt_expected = differentiate(u, dt=t[1]-t[0])\n dudt_computed = differentiate_vec(u, dt=t[1]-t[0])\n diff = abs(dudt_expected - dudt_computed).max()\n tol = 1E-15\n assert diff < tol", "def lsq_deriv(a):\n\n ### Could this be sped up by doing multiplication in Fourier\n ### domain?\n\n # array size error fixed by WBD 20071207\n\n a_x = -scipy.ndimage.convolve(a,X_KERNEL,mode='constant')\n a_y = scipy.ndimage.convolve(a,Y_KERNEL,mode='constant')\n return a_x, a_y", "def derivatives(n_particle, position, velocity, epsilon, mass_a, soften):\r\n v_dot, velocity, m = gravity2(n_particle, position[:, 0:3], epsilon, mass_a, soften, velocity)\r\n print('vel then out')\r\n print(np.shape(velocity))\r\n print(np.shape(v_dot))\r\n print(np.shape(m))\r\n out = np.array([m, velocity[:, 0], velocity[:, 1], velocity[:, 2], v_dot[:, 0], v_dot[:, 1], v_dot[:, 2]])\r\n return np.transpose(out)", "def dshape_deta(xi, eta):\n dN_deta = np.array([-(0.25 - 0.25*xi)*(1 - eta) + (0.25*xi - 0.25)*(-eta - xi - 1),\n -(1 - eta) * (0.25 * xi + 0.25) + (-0.25 * xi - 0.25) * (-eta + xi - 1),\n (eta + 1) * (0.25 * xi + 0.25) + (0.25 * xi + 0.25) * (eta + xi - 1),\n (0.25 - 0.25 * xi) * (eta + 1) + (0.25 - 0.25 * xi) * (eta - xi - 1),\n -0.5 * (1 - xi) * (xi + 1),\n (1 - eta) * (0.5 * xi + 0.5) + (-eta - 1) * (0.5 * xi + 0.5),\n 0.5 * (1 - xi) * (xi + 1),\n (0.5 - 0.5 * xi) * (1 - eta) + (0.5 - 0.5 * xi) * (-eta - 1), ]) # vector\n return dN_deta", "def _ode_dVdt(self, V, t, u_t0, u_t1, sigma):\n alpha = (self.dt - t) / self.dt\n beta = t / self.dt\n x = V[self.x_ind]\n u = u_t0 + (t / self.dt) * (u_t1 - u_t0)\n\n # using \\Phi_A(\\tau_{k+1},\\xi) = \\Phi_A(\\tau_{k+1},\\tau_k)\\Phi_A(\\xi,\\tau_k)^{-1}\n # and pre-multiplying with \\Phi_A(\\tau_{k+1},\\tau_k) after integration\n Phi_A_xi = np.linalg.inv(V[self.A_bar_ind].reshape((self.n_x, self.n_x)))\n\n A_subs = sigma * self.A(x, u)\n B_subs = sigma * self.B(x, u)\n f_subs = self.f(x, u)\n\n dVdt = np.zeros_like(V)\n dVdt[self.x_ind] = sigma * f_subs.transpose()\n dVdt[self.A_bar_ind] = np.matmul(A_subs, V[self.A_bar_ind].reshape((self.n_x, self.n_x))).reshape(-1)\n dVdt[self.B_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * alpha\n dVdt[self.C_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * beta\n dVdt[self.S_bar_ind] = np.matmul(Phi_A_xi, f_subs).transpose()\n z_t = -np.matmul(A_subs, x) - np.matmul(B_subs, u)\n dVdt[self.z_bar_ind] = np.matmul(Phi_A_xi, z_t)\n\n return dVdt", "def f(self, x , u , t = 0 ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ###################\n \n slip = u\n v = x[1]\n \n # compute ratio of horizontal/vertical force\n mu = self.slip2force( slip ) \n \n # constant params local vairables\n ry, rr, rf = self.compute_ratios() \n m = self.mass \n g = self.gravity\n rcda = self.rho * self.cdA\n \n # Drag froce\n fd = 0.5 * rcda * v * np.abs( v ) # drag froce with the right sign\n \n # Acceleration (equation considering weight transfer)\n a = (mu * m * g * rr - fd )/( m * (1 + mu * ry ))\n \n ###################\n \n dx[0] = v # velocity\n dx[1] = a # acc\n \n ###################\n # Normal force check\n fn_front = m * g * rr - m * a * ry\n fn_rear = m * g * rf + m * a * ry\n if (fn_front<0) :\n print('Normal force on front wheel is negative: fn = ', fn_front)\n if (fn_rear<0) : \n print('Normal force on rear wheel is negative: fn = ', fn_rear)\n ###################\n \n return dx", "def f(self, x , u , t = 0 ):\n \n # from state vector (x) to angle and speeds (q,dq)\n [ q , dq ] = self.x2q( x ) \n \n # compute joint acceleration \n ddq = self.ddq( q , dq , u , t ) \n \n # from angle and speeds diff (dq,ddq) to state vector diff (dx)\n dx = self.q2x( dq , ddq ) \n \n return dx", "def numeric_force(atoms, a, i, d=0.001):\n p0 = atoms.get_positions()\n p = p0.copy()\n p[a, i] += d\n atoms.set_positions(p, apply_constraint=False)\n eplus = atoms.get_potential_energy()\n p[a, i] -= 2 * d\n atoms.set_positions(p, apply_constraint=False)\n eminus = atoms.get_potential_energy()\n atoms.set_positions(p0, apply_constraint=False)\n return (eminus - eplus) / (2 * d)", "def calculate_delta_t(initial, final, speed):\n delta_x = final[0] - initial[0]\n delta_y = final[1] - initial[1]\n distance = math.sqrt(delta_x * delta_x + delta_y * delta_y)\n return distance / speed" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic Rotation through 1st axis by an Euler Angle alpha
def ROT1(alpha): cos_al = np.cos(alpha) sin_al = np.sin(alpha) DCM = np.array([[1, 0, 0], [0, cos_al, sin_al], [0, -sin_al, cos_al]]) return DCM
[ "def rotate(self,alpha):\n\n alpha=alpha*(np.pi/180.0)\n return Point(self.x*np.cos(alpha)-self.y*np.sin(alpha),self.y*np.cos(alpha)+self.x*np.sin(alpha))", "def rotate(self, alpha):\r\n\r\n if self.z is None:\r\n self._logger.warn('Z array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length\r\n # 1 or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n if len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.z):\r\n self._logger.warn('Wrong number of \"angles\" - I need {0}'.format(len(self.z)))\r\n # self.rotation_angle = 0.\r\n return\r\n\r\n z_rot = copy.copy(self.z)\r\n z_err_rot = copy.copy(self.z_err)\r\n\r\n for idx_freq in range(len(self.z)):\r\n\r\n angle = lo_angles[idx_freq]\r\n if np.isnan(angle):\r\n angle = 0.\r\n\r\n if self.z_err is not None:\r\n z_rot[idx_freq], z_err_rot[idx_freq] = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle,\r\n self.z_err[idx_freq, :, :])\r\n else:\r\n z_rot[idx_freq], z_err_rot = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle)\r\n\r\n self.z = z_rot\r\n if self.z_err is not None:\r\n self.z_err = z_err_rot\r\n\r\n # for consistency recalculate resistivity and phase\r\n self.compute_resistivity_phase()", "def rotate(self, alpha):\r\n\r\n if self.tipper is None:\r\n self._logger.error('tipper array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length 1\r\n # or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.tipper]\r\n elif len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.tipper]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.tipper):\r\n self._logger.error('Wrong number Number of \"angles\" - need %ii ' % (len(self.tipper)))\r\n self.rotation_angle = 0.\r\n return\r\n\r\n tipper_rot = copy.copy(self.tipper)\r\n tipper_err_rot = copy.copy(self.tipper_err)\r\n\r\n for idx_freq in range(len(tipper_rot)):\r\n angle = lo_angles[idx_freq]\r\n\r\n if self.tipper_err is not None:\r\n tipper_rot[idx_freq], tipper_err_rot[idx_freq] = \\\r\n rotatevector_incl_errors(self.tipper[idx_freq, :, :],\r\n angle,\r\n self.tipper_err[idx_freq, :, :])\r\n else:\r\n tipper_rot[idx_freq], tipper_err_rot = \\\r\n rotatevector_incl_errors(self.tipper[idx_freq, :, :],\r\n angle)\r\n\r\n self.tipper = tipper_rot\r\n self.tipper_err = tipper_err_rot\r\n\r\n # for consistency recalculate mag and angle\r\n self.compute_mag_direction()\r\n\r\n # for consistency recalculate amplitude and phase\r\n self.compute_amp_phase()", "def adjust_heading_degrees(alpha):\n return mod(alpha + 180, 360) - 180", "def Rot1(t):\n return array([[1.,0.,0.], [0.,cos(t),-sin(t)], [0.,sin(t),cos(t)]]);", "def rotation(self,t):\n pass", "def euler(ex, ey, ez, angl):\n\n s = math.sqrt(ex**2 + ey**2 + ez**2)\n ex = ex/s\n ey = ey/s\n ez = ez/s\n beta = math.acos(ez)\n\n #these approximations are for compton scattering\n if (abs(beta) < 0.027):\n alpha = 0.0\n else:\n arg = ey/math.sin(beta)\n aarg = abs(arg)\n if (aarg < 1.0):\n alpha = math.asin(arg)\n else:\n arg = arg/(1.0001*aarg)\n sco1 = math.cos(alpha)*math.sin(beta) + ex\n sco1 = abs(sco1)\n sco2 = abs(ex)\n if (sco1 < sco2):\n beta = -beta\n alpha = -alpha\n gamma = 0.0\n # alpha, beta, gamma are the euler angles of rotation from the z-axis\n # to the direction of the initial particle.\n theta = angl\n rn1 = np.random.rand()\n phi = 2*math.pi*rn1\n\n # now calculate the roation matrix to rotate the scattered direction\n # back to the original axes.\n r11 = math.cos(alpha)*math.cos(beta)*math.cos(gamma) - math.sin(alpha)*math.sin(gamma)\n r12 = math.cos(beta)*math.sin(alpha)*math.cos(gamma) + math.cos(alpha)*math.sin(gamma)\n r13 = -math.sin(beta)*math.cos(gamma)\n r21 = -math.sin(gamma)*math.cos(beta)*math.cos(alpha) - math.sin(alpha)*math.cos(gamma)\n r22 = -math.sin(gamma)*math.cos(beta)*math.sin(alpha) + math.cos(alpha)*math.cos(gamma)\n r23 = math.sin(beta)*math.sin(gamma)\n r31 = math.sin(beta)*math.cos(alpha)\n r32 = math.sin(alpha)*math.sin(beta)\n r33 = math.cos(beta)\n sox = math.sin(theta)*math.cos(phi)\n soy = math.sin(theta)*math.sin(phi)\n soz = math.cos(theta)\n sx = r11*sox + r21*soy + r31*soz\n sy = r12*sox + r22*soy + r32*soz\n sz = r13*sox + r23*soy + r33*soz\n # sx, sy, sz is the unit propagation vector of the scattered particle\n # in the original fram.\n return sx, sy, sz", "def rotate(self, a=None):\r\n if a == None: return self * (-1)\r\n x = (self.x * math.cos(a)) - (self.y * math.sin(a))\r\n y = (self.x * math.sin(a)) + (self.y * math.cos(a))\r\n \r\n return Vector3(x,y,self.z)", "def PlotRotation(self) -> _n_0_t_5:", "def RotX90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][0] = 1.0\n rot[1][2] = 1.0\n rot[2][1] = -1.0\n return rot", "def rotor(theta):\n #return complex(np.cos(theta), -np.sin(theta))\n return np.cos(theta)*complex(1,0) - np.sin(theta)*complex(0,1)", "def RotY90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][2] = -1.0\n rot[1][1] = 1.0\n rot[2][0] = 1.0\n return rot", "def rotate(self, alphaDegree, cx, cy):\n alphaRadian = math.radians(alphaDegree)\n rotateMatrix = (\n math.cos(alphaRadian), math.sin(alphaRadian),\n -math.sin(alphaRadian), math.cos(alphaRadian),\n 0, 0\n )\n if cx == 0 and cy == 0:\n return self.multiply(rotateMatrix)\n\n newMatrix = self.multiply((1, 0, 0, 1, cx, cy)) # compensate for center\n newMatrix = newMatrix.multiply(rotateMatrix)\n\n return newMatrix.multiply((1, 0, 0, 1, -cx, -cy)) # compensate back for center", "def rot_axis():\n import numpy as np\n pi = np.pi\n delta = random.uniform(-1.,1.)\n delta = np.arccos(delta) # arc\n phi = random.uniform(-pi, pi) # rotation\n return delta, phi #radians...", "def zenith_angle(self):\n\t\treturn 90 - self.altitude_angle()", "def rot90(v0, v1):\n return -v1, v0", "def rotationFromAngleDirection(*args):\n return _almathswig.rotationFromAngleDirection(*args)", "def rotAngle(self) -> float:\n\n return self.a", "def axisRotationProjection(*args):\n return _almathswig.axisRotationProjection(*args)", "def rotationalAcceleration(self, t):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic Rotation through 2nd axis by an Euler Angle alpha
def ROT2(alpha): cos_al = np.cos(alpha) sin_al = np.sin(alpha) DCM = np.array([[cos_al, 0, -sin_al], [0, 1, 0], [sin_al, 0, cos_al]]) return DCM
[ "def rotate(self,alpha):\n\n alpha=alpha*(np.pi/180.0)\n return Point(self.x*np.cos(alpha)-self.y*np.sin(alpha),self.y*np.cos(alpha)+self.x*np.sin(alpha))", "def euler(ex, ey, ez, angl):\n\n s = math.sqrt(ex**2 + ey**2 + ez**2)\n ex = ex/s\n ey = ey/s\n ez = ez/s\n beta = math.acos(ez)\n\n #these approximations are for compton scattering\n if (abs(beta) < 0.027):\n alpha = 0.0\n else:\n arg = ey/math.sin(beta)\n aarg = abs(arg)\n if (aarg < 1.0):\n alpha = math.asin(arg)\n else:\n arg = arg/(1.0001*aarg)\n sco1 = math.cos(alpha)*math.sin(beta) + ex\n sco1 = abs(sco1)\n sco2 = abs(ex)\n if (sco1 < sco2):\n beta = -beta\n alpha = -alpha\n gamma = 0.0\n # alpha, beta, gamma are the euler angles of rotation from the z-axis\n # to the direction of the initial particle.\n theta = angl\n rn1 = np.random.rand()\n phi = 2*math.pi*rn1\n\n # now calculate the roation matrix to rotate the scattered direction\n # back to the original axes.\n r11 = math.cos(alpha)*math.cos(beta)*math.cos(gamma) - math.sin(alpha)*math.sin(gamma)\n r12 = math.cos(beta)*math.sin(alpha)*math.cos(gamma) + math.cos(alpha)*math.sin(gamma)\n r13 = -math.sin(beta)*math.cos(gamma)\n r21 = -math.sin(gamma)*math.cos(beta)*math.cos(alpha) - math.sin(alpha)*math.cos(gamma)\n r22 = -math.sin(gamma)*math.cos(beta)*math.sin(alpha) + math.cos(alpha)*math.cos(gamma)\n r23 = math.sin(beta)*math.sin(gamma)\n r31 = math.sin(beta)*math.cos(alpha)\n r32 = math.sin(alpha)*math.sin(beta)\n r33 = math.cos(beta)\n sox = math.sin(theta)*math.cos(phi)\n soy = math.sin(theta)*math.sin(phi)\n soz = math.cos(theta)\n sx = r11*sox + r21*soy + r31*soz\n sy = r12*sox + r22*soy + r32*soz\n sz = r13*sox + r23*soy + r33*soz\n # sx, sy, sz is the unit propagation vector of the scattered particle\n # in the original fram.\n return sx, sy, sz", "def rotate(self, alpha):\r\n\r\n if self.z is None:\r\n self._logger.warn('Z array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length\r\n # 1 or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n if len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.z):\r\n self._logger.warn('Wrong number of \"angles\" - I need {0}'.format(len(self.z)))\r\n # self.rotation_angle = 0.\r\n return\r\n\r\n z_rot = copy.copy(self.z)\r\n z_err_rot = copy.copy(self.z_err)\r\n\r\n for idx_freq in range(len(self.z)):\r\n\r\n angle = lo_angles[idx_freq]\r\n if np.isnan(angle):\r\n angle = 0.\r\n\r\n if self.z_err is not None:\r\n z_rot[idx_freq], z_err_rot[idx_freq] = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle,\r\n self.z_err[idx_freq, :, :])\r\n else:\r\n z_rot[idx_freq], z_err_rot = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle)\r\n\r\n self.z = z_rot\r\n if self.z_err is not None:\r\n self.z_err = z_err_rot\r\n\r\n # for consistency recalculate resistivity and phase\r\n self.compute_resistivity_phase()", "def rotate(self, alpha):\r\n\r\n if self.tipper is None:\r\n self._logger.error('tipper array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length 1\r\n # or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.tipper]\r\n elif len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.tipper]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.tipper):\r\n self._logger.error('Wrong number Number of \"angles\" - need %ii ' % (len(self.tipper)))\r\n self.rotation_angle = 0.\r\n return\r\n\r\n tipper_rot = copy.copy(self.tipper)\r\n tipper_err_rot = copy.copy(self.tipper_err)\r\n\r\n for idx_freq in range(len(tipper_rot)):\r\n angle = lo_angles[idx_freq]\r\n\r\n if self.tipper_err is not None:\r\n tipper_rot[idx_freq], tipper_err_rot[idx_freq] = \\\r\n rotatevector_incl_errors(self.tipper[idx_freq, :, :],\r\n angle,\r\n self.tipper_err[idx_freq, :, :])\r\n else:\r\n tipper_rot[idx_freq], tipper_err_rot = \\\r\n rotatevector_incl_errors(self.tipper[idx_freq, :, :],\r\n angle)\r\n\r\n self.tipper = tipper_rot\r\n self.tipper_err = tipper_err_rot\r\n\r\n # for consistency recalculate mag and angle\r\n self.compute_mag_direction()\r\n\r\n # for consistency recalculate amplitude and phase\r\n self.compute_amp_phase()", "def rotation(self,t):\n pass", "def rot_axis():\n import numpy as np\n pi = np.pi\n delta = random.uniform(-1.,1.)\n delta = np.arccos(delta) # arc\n phi = random.uniform(-pi, pi) # rotation\n return delta, phi #radians...", "def adjust_heading_degrees(alpha):\n return mod(alpha + 180, 360) - 180", "def rotor(theta):\n #return complex(np.cos(theta), -np.sin(theta))\n return np.cos(theta)*complex(1,0) - np.sin(theta)*complex(0,1)", "def rot90(v0, v1):\n return -v1, v0", "def RotY90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][2] = -1.0\n rot[1][1] = 1.0\n rot[2][0] = 1.0\n return rot", "def Rot2(t):\n return array([[cos(t),0.,sin(t)], [0.,1.,0.], [-sin(t),0.,cos(t)]]);", "def rotationFromAngleDirection(*args):\n return _almathswig.rotationFromAngleDirection(*args)", "def PlotRotation(self) -> _n_0_t_5:", "def rotate(self, a=None):\r\n if a == None: return self * (-1)\r\n x = (self.x * math.cos(a)) - (self.y * math.sin(a))\r\n y = (self.x * math.sin(a)) + (self.y * math.cos(a))\r\n \r\n return Vector3(x,y,self.z)", "def Rot1(t):\n return array([[1.,0.,0.], [0.,cos(t),-sin(t)], [0.,sin(t),cos(t)]]);", "def asym_rotate(v):\n # Keep red the same\n rotate_green = 224 // 3\n v[:,:,:,1] = np.roll(v[:,:,:,1], rotate_green, axis = 1)\n \n rotate_blue = (2 * 224) // 3\n v[:,:,:,2] = np.roll(v[:,:,:,2], rotate_blue, axis = 1)\n \n return v", "def RotX90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][0] = 1.0\n rot[1][2] = 1.0\n rot[2][1] = -1.0\n return rot", "def zenith_angle(self):\n\t\treturn 90 - self.altitude_angle()", "def axisRotationProjection(*args):\n return _almathswig.axisRotationProjection(*args)", "def rotz(delta):\n deltaRad = m.pi*delta/180;\n return np.array([[m.cos(deltaRad),-m.sin(deltaRad),0.],[m.sin(deltaRad),m.cos(deltaRad),0.],[0.,0.,1.]]);" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic Rotation through 3rd axis by an Euler Angle alpha
def ROT3(alpha): cos_al = np.cos(alpha) sin_al = np.sin(alpha) DCM = np.array([[cos_al, sin_al, 0], [-sin_al, cos_al, 0], [0, 0, 1]]) return DCM
[ "def rotate(self, alpha):\r\n\r\n if self.z is None:\r\n self._logger.warn('Z array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length\r\n # 1 or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n if len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.z):\r\n self._logger.warn('Wrong number of \"angles\" - I need {0}'.format(len(self.z)))\r\n # self.rotation_angle = 0.\r\n return\r\n\r\n z_rot = copy.copy(self.z)\r\n z_err_rot = copy.copy(self.z_err)\r\n\r\n for idx_freq in range(len(self.z)):\r\n\r\n angle = lo_angles[idx_freq]\r\n if np.isnan(angle):\r\n angle = 0.\r\n\r\n if self.z_err is not None:\r\n z_rot[idx_freq], z_err_rot[idx_freq] = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle,\r\n self.z_err[idx_freq, :, :])\r\n else:\r\n z_rot[idx_freq], z_err_rot = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle)\r\n\r\n self.z = z_rot\r\n if self.z_err is not None:\r\n self.z_err = z_err_rot\r\n\r\n # for consistency recalculate resistivity and phase\r\n self.compute_resistivity_phase()", "def rot3d(*args):\n return _seb.rot3d(*args)", "def rotate(self,alpha):\n\n alpha=alpha*(np.pi/180.0)\n return Point(self.x*np.cos(alpha)-self.y*np.sin(alpha),self.y*np.cos(alpha)+self.x*np.sin(alpha))", "def Rot3(t):\n return array([[cos(t),-sin(t),0.], [sin(t),cos(t),0.], [0.,0.,1.]]);", "def rotate(self, a=None):\r\n if a == None: return self * (-1)\r\n x = (self.x * math.cos(a)) - (self.y * math.sin(a))\r\n y = (self.x * math.sin(a)) + (self.y * math.cos(a))\r\n \r\n return Vector3(x,y,self.z)", "def euler(ex, ey, ez, angl):\n\n s = math.sqrt(ex**2 + ey**2 + ez**2)\n ex = ex/s\n ey = ey/s\n ez = ez/s\n beta = math.acos(ez)\n\n #these approximations are for compton scattering\n if (abs(beta) < 0.027):\n alpha = 0.0\n else:\n arg = ey/math.sin(beta)\n aarg = abs(arg)\n if (aarg < 1.0):\n alpha = math.asin(arg)\n else:\n arg = arg/(1.0001*aarg)\n sco1 = math.cos(alpha)*math.sin(beta) + ex\n sco1 = abs(sco1)\n sco2 = abs(ex)\n if (sco1 < sco2):\n beta = -beta\n alpha = -alpha\n gamma = 0.0\n # alpha, beta, gamma are the euler angles of rotation from the z-axis\n # to the direction of the initial particle.\n theta = angl\n rn1 = np.random.rand()\n phi = 2*math.pi*rn1\n\n # now calculate the roation matrix to rotate the scattered direction\n # back to the original axes.\n r11 = math.cos(alpha)*math.cos(beta)*math.cos(gamma) - math.sin(alpha)*math.sin(gamma)\n r12 = math.cos(beta)*math.sin(alpha)*math.cos(gamma) + math.cos(alpha)*math.sin(gamma)\n r13 = -math.sin(beta)*math.cos(gamma)\n r21 = -math.sin(gamma)*math.cos(beta)*math.cos(alpha) - math.sin(alpha)*math.cos(gamma)\n r22 = -math.sin(gamma)*math.cos(beta)*math.sin(alpha) + math.cos(alpha)*math.cos(gamma)\n r23 = math.sin(beta)*math.sin(gamma)\n r31 = math.sin(beta)*math.cos(alpha)\n r32 = math.sin(alpha)*math.sin(beta)\n r33 = math.cos(beta)\n sox = math.sin(theta)*math.cos(phi)\n soy = math.sin(theta)*math.sin(phi)\n soz = math.cos(theta)\n sx = r11*sox + r21*soy + r31*soz\n sy = r12*sox + r22*soy + r32*soz\n sz = r13*sox + r23*soy + r33*soz\n # sx, sy, sz is the unit propagation vector of the scattered particle\n # in the original fram.\n return sx, sy, sz", "def rotate(self, alpha):\r\n\r\n if self.tipper is None:\r\n self._logger.error('tipper array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length 1\r\n # or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.tipper]\r\n elif len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.tipper]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.tipper):\r\n self._logger.error('Wrong number Number of \"angles\" - need %ii ' % (len(self.tipper)))\r\n self.rotation_angle = 0.\r\n return\r\n\r\n tipper_rot = copy.copy(self.tipper)\r\n tipper_err_rot = copy.copy(self.tipper_err)\r\n\r\n for idx_freq in range(len(tipper_rot)):\r\n angle = lo_angles[idx_freq]\r\n\r\n if self.tipper_err is not None:\r\n tipper_rot[idx_freq], tipper_err_rot[idx_freq] = \\\r\n rotatevector_incl_errors(self.tipper[idx_freq, :, :],\r\n angle,\r\n self.tipper_err[idx_freq, :, :])\r\n else:\r\n tipper_rot[idx_freq], tipper_err_rot = \\\r\n rotatevector_incl_errors(self.tipper[idx_freq, :, :],\r\n angle)\r\n\r\n self.tipper = tipper_rot\r\n self.tipper_err = tipper_err_rot\r\n\r\n # for consistency recalculate mag and angle\r\n self.compute_mag_direction()\r\n\r\n # for consistency recalculate amplitude and phase\r\n self.compute_amp_phase()", "def rotation(self,t):\n pass", "def rotZ(alpha, N = 3):\n R = np.identity(N)\n R[0,0] = math.cos(alpha)\n R[0,1] = -math.sin(alpha)\n R[1,0] = math.sin(alpha)\n R[1,1] = math.cos(alpha)\n\n return R", "def rotor(theta):\n #return complex(np.cos(theta), -np.sin(theta))\n return np.cos(theta)*complex(1,0) - np.sin(theta)*complex(0,1)", "def adjust_heading_degrees(alpha):\n return mod(alpha + 180, 360) - 180", "def RotX90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][0] = 1.0\n rot[1][2] = 1.0\n rot[2][1] = -1.0\n return rot", "def asym_rotate(v):\n # Keep red the same\n rotate_green = 224 // 3\n v[:,:,:,1] = np.roll(v[:,:,:,1], rotate_green, axis = 1)\n \n rotate_blue = (2 * 224) // 3\n v[:,:,:,2] = np.roll(v[:,:,:,2], rotate_blue, axis = 1)\n \n return v", "def RotY90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][2] = -1.0\n rot[1][1] = 1.0\n rot[2][0] = 1.0\n return rot", "def rotz(delta):\n deltaRad = m.pi*delta/180;\n return np.array([[m.cos(deltaRad),-m.sin(deltaRad),0.],[m.sin(deltaRad),m.cos(deltaRad),0.],[0.,0.,1.]]);", "def angleAndAxisRotationFromQuaternion(*args):\n return _almathswig.angleAndAxisRotationFromQuaternion(*args)", "def get_rotation(a, b, c):\n return (b.x - a.x) * (c.y - b.y) - (b.y - a.y) * (c.x - b.x)", "def rot_axis():\n import numpy as np\n pi = np.pi\n delta = random.uniform(-1.,1.)\n delta = np.arccos(delta) # arc\n phi = random.uniform(-pi, pi) # rotation\n return delta, phi #radians...", "def rotation3D(X, y):\n alpha, beta, gamma = np.random.randint(0, 31, size=3)/180*np.pi\n Rx = np.array([[1, 0, 0],\n [0, np.cos(alpha), -np.sin(alpha)],\n [0, np.sin(alpha), np.cos(alpha)]])\n \n Ry = np.array([[np.cos(beta), 0, np.sin(beta)],\n [0, 1, 0],\n [-np.sin(beta), 0, np.cos(beta)]])\n \n Rz = np.array([[np.cos(gamma), -np.sin(gamma), 0],\n [np.sin(gamma), np.cos(gamma), 0],\n [0, 0, 1]])\n \n R = np.dot(np.dot(Rx, Ry), Rz)\n \n X_rot = np.empty_like(X)\n for channel in range(X.shape[-1]):\n X_rot[:,:,:,channel] = affine_transform(X[:,:,:,channel], R, offset=0, order=3, mode='constant')\n y_rot = affine_transform(y, R, offset=0, order=0, mode='constant')\n \n return X_rot, y_rot", "def test_make_r_ck(self):\n\n # invert angle\n res = np.dot(Rigid3D.make_r_ck([0.2, 0.4, 0.3, np.sqrt(0.71)]), \n Rigid3D.make_r_ck([-0.2, 0.4, 0.3, np.sqrt(0.71)]))\n np_test.assert_almost_equal(res, np.identity(3))\n\n # invert axis\n res = np.dot(Rigid3D.make_r_ck([0.5, 0.4, 0.6, np.sqrt(0.23)]), \n Rigid3D.make_r_ck([0.5, -0.4, -0.6, -np.sqrt(0.23)]))\n np_test.assert_almost_equal(res, np.identity(3))\n\n # no rot\n e = [1., 0, 0, 0]\n r_ck = Rigid3D.make_r_ck(e)\n euler = Rigid3D.extract_euler(r_ck, mode='zxz_ex_active')\n np_test.assert_almost_equal(euler[1], 0)\n\n # rot around x axis\n e = np.array([1., 1, 0, 0]) / np.sqrt(2)\n r_ck = Rigid3D.make_r_ck(e)\n euler = Rigid3D.extract_euler(r_ck, mode='zxz_ex_active')\n np_test.assert_almost_equal(euler, [0, np.pi/2, 0])\n\n # rot around x axis\n e = np.array([0.8, 0.6, 0, 0])\n r_ck = Rigid3D.make_r_ck(e)\n euler = Rigid3D.extract_euler(r_ck, mode='zxz_ex_active')\n np_test.assert_almost_equal(euler, [0, 2* np.arccos(0.8), 0])\n\n # rot around y axis\n e = np.array([1., 0, 1, 0]) / np.sqrt(2)\n r_ck = Rigid3D.make_r_ck(e)\n euler = Rigid3D.extract_euler(r_ck, mode='zyz_ex_active')\n np_test.assert_almost_equal(euler, [0, np.pi/2, 0])\n\n # rot around z axis\n e = np.array([0.6, 0, 0, 0.8])\n r_ck = Rigid3D.make_r_ck(e)\n euler = Rigid3D.extract_euler(r_ck, mode='zxz_ex_active')\n np_test.assert_almost_equal(euler[0] + euler[2], 2* np.arccos(0.6))\n np_test.assert_almost_equal(euler[1], 0)\n\n # arbitrary rot \n e = np.array([0.4, -0.5, -0.3, np.sqrt(0.52)])\n r_ck = Rigid3D.make_r_ck(e)\n np_test.assert_almost_equal(\n r_ck, \n [[-0.2, -0.2768882, -0.96111026],\n [ 0.8768882 , -0.52 , -0.03266615],\n [-0.48111026, -0.83266615, 0.34 ]])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
As the name says, count the number of wall collisions of n ball in t seconds in the box of dimensions 2s x 2s. To avoid division by zero, vx and vy should be checked if they are zero. Function basically counts the collisions for vx and vy separately, as all collisions are elastic
def count_collisions(point_vector, n, k, t, s): num_of_collisions = 0 point_col = [] for i in range(n): curr_col = 0 time_x, time_y = t, t x, y, vx, vy = point_vector[i] if vx != 0: if vx > 0: line_len = s - x time_x -= line_len / abs(vx) else: line_len = s + x time_x -= line_len / abs(vx) if time_x > 0: curr_col += 1 + (abs(vx) * time_x) // (2 * s) if vy != 0: if vy > 0: line_len = s - y time_y -= line_len / abs(vy) else: line_len = s + y time_y -= line_len / abs(vy) if time_y > 0: curr_col += 1 + (abs(vy) * time_y) // (2 * s) point_col.append(curr_col) num_of_collisions += curr_col return num_of_collisions, point_col
[ "def count_obstacles_in_my_elf_way_to_castle(game, elf):\n count = 0\n for portal in game.get_enemy_portals():\n if portal.distance(elf) + portal.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n portal.distance(elf) + portal.distance(game.get_enemy_castle()) > elf.distance(game.get_enemy_castle()) - game.portal_size:\n \n count += 2 # portals are harder to kill so i consider them as 2 (in comperisson it wont matter)\n \n for mana_fountain in game.get_enemy_mana_fountains():\n if mana_fountain.distance(elf) + mana_fountain.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n mana_fountain.distance(elf) + mana_fountain.distance(game.get_enemy_castle()) > elf.distance(game.get_enemy_castle()) - game.portal_size:\n \n count +=1\n\n return count", "def count_obstacles_in_enemy_elf_way_to_castle(game, elf):\n count = 0\n for portal in game.get_my_portals():\n if portal.distance(elf) + portal.distance(game.get_my_castle()) < elf.distance(game.get_my_castle()) + game.portal_size or \\\n portal.distance(elf) + portal.distance(game.get_my_castle()) > elf.distance(game.get_my_castle()) - game.portal_size:\n \n count += 2 # portals are harder to kill so i consider them as 2 (in comperisson it wont matter)\n \n for mana_fountain in game.get_my_mana_fountains():\n if mana_fountain.distance(elf) + mana_fountain.distance(game.get_my_castle()) < elf.distance(game.get_my_castle()) + game.portal_size or \\\n mana_fountain.distance(elf) + mana_fountain.distance(game.get_my_castle()) > elf.distance(game.get_my_castle()) - game.portal_size:\n \n count +=1\n \n return count", "def compute_bodycollision_acceleration(i):\n\n axt = 0\n ayt = 0\n ri = r[i]\n #Collisions due to persons\n for j in range(n):\n if contact_p[i][j] != 0:\n kg = k * (ri + r[j] - contact_p[i][j])\n nx = x[0][i] - x[0][j]\n ny = x[1][i] - x[1][j]\n size_n = math.hypot(nx,ny)\n nx = nx / size_n\n ny = ny / size_n\n fx = kg * nx\n fy = kg * ny\n ax = fx / mass[i]\n ay = fy / mass[i]\n axt = axt + ax\n ayt = ayt + ay\n #Collisions due to walls\n for w in range(n_walls):\n if contact_w[i][w] != 0:\n kg = k * (ri - contact_w[i][w])\n #find normal direction to wall\n wall = walls[:,w]\n a = wall[0]\n b = wall[1]\n c = wall[2]\n wall_start = wall[3] - ri\n wall_end = wall[4] + ri\n nx,ny = wall[-2],wall[-1]\n\n fx = kg * nx\n fy = kg * ny\n ax = fx / mass[i]\n ay = fy / mass[i]\n axt = axt + ax\n ayt = ayt + ay\n\n return [axt, ayt]", "def detect_collisions(balls):\n n_balls = len(balls)\n world_min_x = -200.0*n_balls**.5 # minimum x in world coordinates\n world_max_x = +200.0*n_balls**.5 # maximum x in world coordinates\n world_min_y = -200.0*n_balls**.5 # minimum y in world coordinates\n world_max_y = +200.0*n_balls**.5 # maximum y in world coordinates\n set_of_collisions = set()\n\n set_of_collisions_2 = set()\n\n# for i in range(len(balls)):\n# b1 = balls[i]\n# for j in range(i):\n# b2 = balls[j]\n# if gas.colliding(b1, b2):\n# set_of_collisions_2.add(gas.ball_pair(b1, b2))\n\n cloumn_num = int(math.ceil(400 * n_balls**.5 / 256))\n squared_list = [[] for x in range(cloumn_num) for y in range(cloumn_num)]\n total_num = cloumn_num * cloumn_num\n for i in range(n_balls):\n x_pos = int(math.floor((balls[i].x - world_min_x) / 256))\n y_pos = int(math.floor((balls[i].y - world_min_y) / 256))\n squared_list[x_pos * cloumn_num + y_pos].append(balls[i])\n\n for i in range(len(squared_list)):\n for j in range(len(squared_list[i])):\n b1 = squared_list[i][j]\n for k in range(j):\n b2 = squared_list[i][k]\n if gas.colliding(b1, b2):\n set_of_collisions.add(gas.ball_pair(b1, b2))\n # if(i >= cloumn_num):\n # list_collisions(squared_list[i], squared_list[i - cloumn_num],set_of_collisions)\n if(i < total_num - cloumn_num):\n list_collisions(squared_list[i], squared_list[i + cloumn_num],set_of_collisions)\n # if i % cloumn_num > 0:\n # list_collisions(squared_list[i], squared_list[i - 1],set_of_collisions)\n if i % cloumn_num < cloumn_num - 1:\n list_collisions(squared_list[i], squared_list[i + 1],set_of_collisions)\n\n if i < total_num - cloumn_num and i % cloumn_num > 0:\n list_collisions(squared_list[i], squared_list[i + cloumn_num - 1],set_of_collisions)\n\n if i < total_num - cloumn_num and i % cloumn_num < cloumn_num - 1:\n list_collisions(squared_list[i], squared_list[i + cloumn_num + 1],set_of_collisions)\n\n\n #print \"set_of_collisions_2 \", len(set_of_collisions_2)\n #print \"set_of_collisions \", len(set_of_collisions)\n return set_of_collisions", "def collision_and_bounce(self):\n ball_upperleft = self.window.get_object_at(self.ball.x, self.ball.y)\n ball_upperright = self.window.get_object_at(self.ball.x + 2*BALL_RADIUS, self.ball.y)\n ball_lowerleft = self.window.get_object_at(self.ball.x ,self.ball.y+2*BALL_RADIUS)\n ball_lowerright = self.window.get_object_at(self.ball.x + 2*BALL_RADIUS,self.ball.y+2*BALL_RADIUS)\n\n if ball_upperleft is not None:\n if ball_upperleft is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_upperleft)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_upperleft is self.paddle:\n self.__dy = -INITIAL_Y_SPEED\n\n elif ball_upperright is not None:\n if ball_upperright is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_upperright)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_upperright is self.paddle.x:\n self.__dy = -INITIAL_Y_SPEED\n\n elif ball_lowerleft is not None:\n if ball_lowerleft is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_lowerleft)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_lowerleft is self.paddle:\n self.__dy = -INITIAL_Y_SPEED\n\n elif ball_lowerright is not None:\n if ball_lowerright is not self.paddle:\n self.__dy *= -1\n self.window.remove(ball_lowerright)\n self.how_many_bricks -= 1\n print(self.how_many_bricks)\n if ball_lowerright is self.paddle:\n self.__dy = -INITIAL_Y_SPEED", "def autoCollisions(self):\n total = 0\n for k in range(1, self.noJoints):\n contacts = self.p.getContactPoints(bodyA=k)\n for contact in contacts:\n if contact[2] != self.floor:\n total += contact[9]\n return total", "def get_number_of_intersections(t):\n\n global FiberIntersections\n \n FiberIntersections={} # FiberIntersections[id]=[abcissa of intersection]\n \n N_intersections=0\n \n for x in range(1,len(BOXS[0])):\n for y in range(1,len(BOXS)):\n if (BOXS[x][y] !=0):\n \n for xx in range(x-1,x+2):\n for yy in range(y-1,y+2):\n if (BOXS[xx][yy] !=0):\n \n for A,B,fib in BOXS[x][y]:\n for C,D,fib2 in BOXS[xx][yy]:\n if fib>fib2:\n n,px,py = lineIntersection(A,B,C,D)\n N_intersections += n\n \n if fib not in FiberIntersections:\n FiberIntersections[fib]=[]\n x0,y0 = FiberPts[t][fib][0]\n if abs(x0)<5 and abs(y0)<5:\n L=sqrt((x0-px)**2+(y0-py)**2)\n if L not in FiberIntersections[fib]:\n FiberIntersections[fib].append(L)\n\n if fib2 not in FiberIntersections:\n FiberIntersections[fib2]=[]\n x0,y0 = FiberPts[t][fib2][0]\n if abs(x0)<5 and abs(y0)<5:\n L=sqrt((x0-px)**2+(y0-py)**2)\n if L not in FiberIntersections[fib2]:\n FiberIntersections[fib2].append(L)\n\n return N_intersections, N_intersections*1.0/nb_fibers", "def collide(self):\n ball_y, ball_x = self.ball\n y1, y2, y3 = range(self.racquet_1[1], self.racquet_1[1] + self.pl_size)\n z1, z2, z3 = range(self.racquet_2[1], self.racquet_2[1] + self.pl_size)\n if ball_x == 1:\n if ball_y == y1:\n self.ball_dir = 0\n return 1\n elif ball_y == y2:\n self.ball_dir = 4\n return 1\n elif ball_y == y3:\n self.ball_dir = 1\n return 1\n else:\n return -10\n\n elif ball_x == self.matrix_size[1] - 2:\n if ball_y == z1:\n self.ball_dir = 2\n elif ball_y == z2:\n self.ball_dir = 5\n elif ball_y == z3:\n self.ball_dir = 3\n\n return 0", "def obstacle_count(self):\n\n # Setting up magic variables\n found_something = False # Trigger\n count = 0\n trigger_distance = 250\n\n # Writing down starting position for storage\n starting_position = self.get_heading()\n\n # Starting rotation for scanning\n self.right(primary=60, counter=60)\n\n # While loop for object scanning\n while self.get_heading() != starting_position:\n if self.read_distance() < trigger_distance and not found_something:\n found_something = True\n count += 1\n print(\"\\n Found something!\")\n elif self.read_distance() > trigger_distance and found_something:\n found_something = False\n print(\"\\n Seems I have a clear view, resetting trigger\")\n\n self.stop\n print(\"I found %d objects\" % count)\n return count", "def _count_balls(self) -> int:\n self.debug_log(\"Counting Balls\")\n balls = 0\n\n for device in self.machine.ball_devices.values():\n # skip playfields\n if device.is_playfield():\n continue\n if device.ball_count_handler.counter.config.get('ball_switches'):\n for switch in device.ball_count_handler.counter.config['ball_switches']:\n if self.machine.switch_controller.is_active(\n switch, ms=device.ball_count_handler.counter.config['entrance_count_delay']):\n balls += 1\n elif self.machine.switch_controller.is_inactive(\n switch, ms=device.ball_count_handler.counter.config['exit_count_delay']):\n continue\n else:\n raise ValueError(\"switches not stable\")\n elif 'trough' in device.tags:\n # special handling for troughs (needed for gottlieb)\n balls += device.balls\n\n return balls", "def _get_total_balls_in_devices(self) -> int:\n balls = 0\n # get count for all ball devices\n for device in self.machine.ball_devices.values():\n if device.is_playfield():\n continue\n\n balls += device.ball_count_handler.counter.count_balls_sync()\n return balls", "def _check_for_collisions(self):\n for atom_A in self.atoms:\n for atom_B in self.atoms:\n if atom_A != atom_B:\n if check_collision(atom_A, atom_B):\n\n if atom_A.type == \"ZERO\" or atom_B == \"ZERO\":\n self.atom_zero_collisions += 1\n\n if atom_A.type == \"ZERO\" and atom_B.type == \"HEALTHY\":\n atom_B.type = \"INFECTED\"\n atom_B.color = color[\"CARRIER\"]\n self.atom_zero_distances.append(\n self.ticks_between_collisions\n * atom_A.velocity_vector\n )\n\n elif atom_A.type == \"HEALTHY\" and atom_B.type == \"ZERO\":\n atom_A.type = \"INFECTED\"\n atom_A.color = color[\"CARRIER\"]\n self.atom_zero_distances.append(\n self.ticks_between_collisions\n * atom_B.velocity_vector\n )\n\n # If atom zero collided, reset tick between collisions counter\n self.ticks_between_collisions = 0\n\n self._collide(atom_A, atom_B)", "def lavabox_collide(ball):\n \n #If the ball hits mybox\n if ball.pos.z > (mybox.pos.z - 1.5) and ball.pos.z < (mybox.pos.z + 1.5) and ball.pos.x > (mybox.pos.x - 1.5) and ball.pos.x < (mybox.pos.x + 1.5):\n ball.vel.z *= -1.0\n ball.vel.x *= -1.0\n \n if ball.pos.z > (mybox1.pos.z - 1.5) and ball.pos.z < (mybox1.pos.z + 1.5) and ball.pos.x > (mybox1.pos.x - 1.5) and ball.pos.x < (mybox1.pos.x + 1.5):\n ball.vel.z *= -1.0\n ball.vel.x *= -1.0\n \n if ball.pos.z > (mybox2.pos.z - 1.5) and ball.pos.z < (mybox2.pos.z + 1.5) and ball.pos.x > (mybox2.pos.x - 1.5) and ball.pos.x < (mybox2.pos.x + 1.5):\n ball.vel.z *= -1.0\n ball.vel.x *= -1.0\n \n if ball.pos.z > (mybox3.pos.z - 1.5) and ball.pos.z < (mybox3.pos.z + 1.5) and ball.pos.x > (mybox3.pos.x - 1.5) and ball.pos.x < (mybox3.pos.x + 1.5):\n ball.vel.z *= -1.0\n ball.vel.x *= -1.0\n \n if ball.pos.z > (mybox4.pos.z - 1.5) and ball.pos.z < (mybox4.pos.z + 1.5) and ball.pos.x > (mybox4.pos.x - 1.5) and ball.pos.x < (mybox4.pos.x + 1.5):\n ball.vel.z *= -1.0\n ball.vel.x *= -1.0", "def CountNeighbours(self):\r\n idx = lambda c: -int(c==0) + int(c==self.size-1)\r\n deltas = { -1:[0, 1], 0:[-1, 0, 1], 1:[-1, 0] }\r\n ## localize instance vars local for speed (gains +1 fps)\r\n size = self.size\r\n grid = self.grid\r\n count = self.count\r\n ## #\r\n for y in xrange(size):\r\n for x in xrange(size):\r\n count[y][x] = 0\r\n vertical = deltas[idx(y)]\r\n horizontal = deltas[idx(x)]\r\n for dy in vertical:\r\n for dx in horizontal:\r\n if (dy != 0) or (dx != 0):\r\n count[y][x] += grid[y+dy][x+dx]", "def ball_collisions(self):\n up_l_corner = self.window.get_object_at(self.ball.x, self.ball.y)\n up_r_corner = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y)\n down_l_corner = self.window.get_object_at(self.ball.x, self.ball.y + self.ball.height)\n down_r_corner = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.height)\n\n # The situation that the ball hits the paddle.\n if down_l_corner == self.paddle:\n self.__dy = self.reverse_dy\n elif down_r_corner == self.paddle:\n self.__dy = self.reverse_dy\n\n # The situation that the ball hits bricks and remove them.\n if up_l_corner is not None and up_l_corner is not self.paddle and up_l_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(up_l_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif up_r_corner is not None and up_r_corner is not self.paddle and up_r_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(up_r_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif down_l_corner is not None and down_l_corner is not self.paddle and down_l_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(down_l_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif down_r_corner is not None and down_r_corner is not self.paddle and down_r_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(down_r_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)", "def pingpong_bounce_count(n):\n return [sum(steps) for steps in [pingpong_bounce(k) for k in range(1,n+1)]]", "def obstacle_count(self):\n # Gotten from the discord server\n # do a scan of the area in front of the robot\n self.scan()\n # FIGURE OUT HOW MANY OBSTACLES THERE WERE\n see_an_object = False\n count = 0\n\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object:\n see_an_object = True\n count += 1\n print(\"~~~~ I SEE SOMETHING!!! ~~~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\")\n\n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n print(\"\\nI saw %d objects\" % count)", "def check_walls(self, aim):\n k = 100\n points = []\n dy = (aim.y - self.y) / k\n dx = (aim.x - self.x) / k\n for i in range(k):\n points.append([self.x + math.ceil(i * dx), self.y + math.ceil(i * dy)])\n for i in self.cells:\n for j in i:\n for k in points:\n if (k[0] - j[0] > 0) and (k[0] - j[0] < self.cell_size) and (k[1] - j[1] > 0) and (\n k[1] - j[1] < self.cell_size):\n if j[2] == -1:\n return 1\n return 0", "def particle_detection_perf(actual, detected):\n\n # calculate the number of particles in the 'truth' image\n n_particles = np.sum(actual)\n\n # calculate the number of valid particles that have been detected\n # there are ones at each particle location and zeros elsewhere,\n # so the product will only be 1 where an actual particle is detected,\n # and will be 0 elsewhere\n n_detect_valid = np.sum(detected * actual)\n\n # calculate the number of particles which are 'detected' but are not\n # actually particles\n n_detect_invalid = np.sum(detected - actual == 1)\n\n # calculate the number of particles which are 'detected' but are not\n # actually particles\n n_undetected = np.sum(detected - actual == -1)\n\n return n_particles, n_detect_valid, n_detect_invalid, n_undetected" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates "the beginning of time" by getting the average of time it takes for all points to travel back to their origin and rounding it up
def calculate_the_beginning(point_vector, n): avg_time = 0 for i in range(n): s = np.sqrt(point_vector[i, 0] ** 2 + point_vector[i, 1] ** 2) v = np.sqrt(point_vector[i, 2] ** 2 + point_vector[i, 3] ** 2) avg_time += s / v avg_time /= n return round(avg_time)
[ "def average_time(self):\n return int((sum(self.times) / max(1, len(self.times))) * 1000)", "def total_time(self):\n t = timedelta()\n for step in self.steps:\n if ('time' in step):\n t += self.parsetime(step['time'])\n return(t)", "def get_travel_time_in_min(self):\n if len(self.step_list) != 0:\n duration = sum([x.duration for x in self.step_list], start=timedelta())\n return duration.seconds // 60\n else:\n return None", "def duration(self):\n return self.points[self.end].time - self.points[self.start].time", "def get_avg_time_spent():\n curr_step_start = {}\n curr_step = {}\n avg_times = {}\n for s in STREAMS:\n user, step, time, enter = s\n\n if not enter: # exiting. collect time for it\n time_taken = time - curr_step_start[user]\n if step not in avg_times:\n count = 1\n else: # have seen this step. acccumulate\n time_taken += avg_times[step][0] * avg_times[step][1]\n count = avg_times[step][1] + 1\n avg_times[step] = (time_taken / count, count)\n\n # where is my user?\n curr_step[user] = step if enter else None\n curr_step_start[user] = time\n\n print(\"avg_time:\", avg_times)\n print(\"cur_step:\", curr_step)", "def getMeanSpeed(self):\n distance = self.getTraveledDistance()\n time = self.getTraveledTime()\n # escape from division by zero\n if eq(time,0):\n time = 1\n return distance / time", "def mean_lap_time(self):\n if not self.laptimes:\n return np.nan\n return (self.laptimes[-1] - self.timestamp_start).total_seconds() \\\n / float(len(self.laptimes))", "def average_time_per_setpoint(self):\n all_setpoint_times = []\n current_setpoint_time = 0\n for i, row in enumerate(self.log_rows[:-1]): # ignore the last entry, since we can't know how long that one took (there is no next row to check the timestamp of)\n current_timestamp = row[self.std_col_map[Headers.TIMESTAMP]]\n next_timestamp = self.log_rows[i+1][self.std_col_map[Headers.TIMESTAMP]]\n current_setpoint_time += (next_timestamp - current_timestamp).total_seconds()\n if row[self.std_col_map[Headers.RESULT]]:\n all_setpoint_times.append(current_setpoint_time)\n current_setpoint_time = 0\n return np.average(all_setpoint_times)", "def travel_time(self, time):\n floors = self.generate_floor_selections(time)\n t_time = 0\n cur_floor = 1 \n for i in range(len(floors)):\n floor_diff = floors[i] - cur_floor\n t_time += TRAVEL_TIMES[floor_diff]\n cur_floor = floors[i]\n return t_time", "def _calculate_runtime(self):\n\n _time = 0\n for _, _passes, _captures in self._batches:\n for cap in _captures:\n _time_temp = ((cap.duration * _passes))\n\n if cap.focused:\n _nmacs = len(cap.macs)\n _deg, _dur = cap.focused\n _time_fine = (_deg * _dur) / 360\n _time_fine *= _nmacs\n _time_temp += _time_fine\n\n _time += _time_temp\n\n return datetime.timedelta(seconds=_time)", "def calcTravelTime(p1, p2, speed):\n return np.linalg.norm(p2 - p1) / speed", "def totalTime(self):\n return time.time()-self.start", "def getTraveledTime(self):\n return abs(self.arrival - self.departure)", "def time_update(self):\r\n self.time = []\r\n t = [0] + self.time_final_all_section()\r\n for i in range(self.number_of_section):\r\n self.time.append((t[i+1] - t[i]) / 2.0 * self.tau[i]\r\n + (t[i+1] + t[i]) / 2.0)\r\n return np.concatenate([i for i in self.time])", "def _get_avg_runtime(self):\n run_time_total = 0\n for run_time in self._run_times:\n run_time_total = run_time_total + run_time\n\n return int(run_time_total / len(self._run_times))", "def calculate_point_speed(p1: geo.Point, p2: geo.Point) -> float:\n s = geo.gps_distance((p1.lat, p1.lon), (p2.lat, p2.lon))\n t = abs(p2.time - p1.time)\n try:\n return s / t\n except ZeroDivisionError:\n return float(\"inf\") if 0 <= s else float(\"-inf\")", "def wait_time_average(self):\n return self.time_average(self.elapsed_data['elapsed_time'], self.elapsed_data['queue'])", "def time(self):\n return ((self['clock']['initial'] + 40 * self['clock']['increment'])\n / 60)", "def get_time_step(time_array):\n if len(time_array) > 1:\n return abs(time_array[1] - time_array[0])\n else:\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check CloudFormation Ref/GetAtt for Conditions
def match(self, cfn): matches = [] # Start with Ref checks ref_objs = cfn.search_deep_keys('Ref') for ref_obj in ref_objs: value = ref_obj[-1] if value not in PSEUDOPARAMS: scenarios = cfn.is_resource_available(ref_obj, value) for scenario in scenarios: scenario_text = ' and '.join( ['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()]) message = 'Ref to resource "{0}" that may not be available {1} at {2}' matches.append( RuleMatch( ref_obj[:-1], message.format( value, scenario_text, '/'.join(map(str, ref_obj[:-1]))))) # The do GetAtt getatt_objs = cfn.search_deep_keys('Fn::GetAtt') for getatt_obj in getatt_objs: value_obj = getatt_obj[-1] value = None if isinstance(value_obj, list): value = value_obj[0] elif isinstance(value_obj, six.string_types): value = value_obj.split('.')[0] if value: if value not in PSEUDOPARAMS: scenarios = cfn.is_resource_available(getatt_obj, value) for scenario in scenarios: scenario_text = ' and '.join( ['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()]) message = 'GetAtt to resource "{0}" that may not be available {1} at {2}' matches.append( RuleMatch( getatt_obj[:-1], message.format( value, scenario_text, '/'.join(map(str, getatt_obj[:-1]))))) return matches
[ "def _check_rule_has_attribute(self, data_sources, conditions):\n return hasattr(data_sources['asset'], conditions['attribute']) and \\\n getattr(data_sources['asset'], conditions['attribute']) is not None", "def is_ca_external(self, obj_dict):\n return (self.endpoint == objects.get_singular(\n objects.CUSTOM_ATTRIBUTES) and\n obj_dict[\"definition_type\"] in objects.ALL_SINGULAR_DISABLED_OBJS)", "def retrieveCondition(self):\n return True", "def test_attribute_effective_from_in_attributes(self):\n self._load_template_database()\n nrth_bnd_api = api.build()\n tmp_mxn = nrth_bnd_api.registry.get_category(\"/silver/\", None)\n self.entity.mixins = [tmp_mxn]\n del self.entity.attributes[\"occi.agreement.effectiveFrom\"]\n self.assertRaises(AttributeError, self.agree_back.create, self.entity,\n self.extras)", "def test_that_required_attributes_are_used(self):\n # m3 has required attributes \n self.entity.mixins.append(test_data.m3)\n\n self.entity.attributes = {\"occi.agreement.effectiveFrom\": \"14001245\",\n \"os\": \"ubuntu\", \"vm_cores\": \"4\"}\n self.assertRaises(AttributeError, self.agree_back.create, self.entity,\n self.extras)\n LOG.info(\"Agreement ensures use of required variables\")", "def check_property(instance, attr, reference):\n attr_val = getattr(instance, attr)\n return check_value(attr_val, reference=reference)", "def is_condition(instance, value, property_name, **_):\n value = resolve_permission_variable(value)\n property_value = get_deep_attr(instance, property_name)\n return value == property_value", "def has_ref(self, prop, itis, claim):\n if claim.sources:\n for i, source in enumerate(claim.sources):\n if prop in source:\n for s in source[prop]:\n if self.bypass_redirect(s.getTarget()) == itis:\n return True\n return False", "def test_resource_assignment_resource_get_available_attribute_types_for_asset_get(self):\n pass", "def attribute_check(obj, attribute):\n\n check_node(obj)\n\n dep_node = get_depend_node(obj)\n dep_fn = maya.api.OpenMaya.MFnDependencyNode()\n dep_fn.setObject(dep_node)\n return dep_fn.hasAttribute(attribute)", "def checkISReference(self, container: 'SoFieldContainer', fieldname: 'SbName', readok: 'SbBool &') -> \"SbBool\":\n return _coin.SoInput_checkISReference(self, container, fieldname, readok)", "def test_ruleset_condition_reference_elements(self):\n\n validation_result = validify.validate(\"validify/tests/test_rule_condition_reference_elements.xml\", validation_rules=compile_test_rules(), log_to_console=False)\n assert len(validation_result) == 0", "def hasAttribute(*args, **kwargs):\n \n pass", "def cfcheck(**das):\n return True", "def validate_generic_object_attribute(self, obj, attr):\n if attr == \"statuses\":\n return True\n\n for ob in self._validator.get_entities(): \n #try to find the matching attribute\n for attribute in ob.get_attributes():\n if attribute.get_attribute_name() == attr:\n return True\n \n #try to find the matching action\n for action in ob.get_actions():\n if action.get_action_name() == attr:\n return True\n return False", "def test_get_attr(self):\r\n\r\n self.stack = parser.Stack(self.ctx, 'test_get_attr',\r\n template.Template(self.hot_tpl))\r\n self.stack.store()\r\n self.stack.create()\r\n self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),\r\n self.stack.state)\r\n\r\n rsrc = self.stack[self.resource_name]\r\n for action, status in (\r\n (rsrc.CREATE, rsrc.IN_PROGRESS),\r\n (rsrc.CREATE, rsrc.COMPLETE),\r\n (rsrc.RESUME, rsrc.IN_PROGRESS),\r\n (rsrc.RESUME, rsrc.COMPLETE),\r\n (rsrc.UPDATE, rsrc.IN_PROGRESS),\r\n (rsrc.UPDATE, rsrc.COMPLETE)):\r\n rsrc.state_set(action, status)\r\n\r\n resolved = function.resolve(self.stack.t.parse(self.stack,\r\n self.snippet))\r\n self.assertEqual(self.expected, resolved)", "def test_attribute_effective_Until_in_attributes(self):\n self._load_template_database()\n nrth_bnd_api = api.build()\n tmp_mxn = nrth_bnd_api.registry.get_category(\"/silver/\", None)\n self.entity.mixins = [tmp_mxn]\n del self.entity.attributes[\"occi.agreement.effectiveUntil\"]\n self.assertRaises(AttributeError, self.agree_back.create, self.entity,\n self.extras)", "def test_attribute_doc_in_json(hlwm, clsname, object_path, json_doc):\n path = object_path(hlwm)\n\n for _, attribute in json_doc['objects'][clsname]['attributes'].items():\n name = attribute['name']\n help_txt = hlwm.call(['help', f'{path}.{name}'.lstrip('.')]).stdout\n help_lines = help_txt.rstrip().split('\\n')\n doc_in_help = ''\n # the doc is everything after 'Current value: ..'\n for line_idx in range(0, len(help_lines) - 1):\n # a line starting with 'Current value: '\n found = help_lines[line_idx].startswith('Current value: ')\n # and the next line is empty:\n found = found and help_lines[line_idx + 1] == ''\n if found:\n # the doc is everything after the empty line:\n doc_in_help = '\\n'.join(help_lines[line_idx + 2:])\n break\n if not doc_in_help.startswith('Current value:'):\n # if there is a doc printed by 'help', then it\n # should also be present in the json:\n assert doc_in_help == attribute.get('doc', '').rstrip()", "def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrometheusSpecStorageVolumeClaimTemplateStatusConditionsArgs']]]]:\n return pulumi.get(self, \"conditions\")", "def __attributeExists(self, Visum, attributeName):\r\n\t\r\n for attr in Visum.Net.Zones.Attributes.GetAll:\r\n\t if str(attr.ID).upper() == attributeName.upper():\r\n\t\treturn True\r\n\treturn False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Objects should not be automatically associated with a particular site when ``PHOTOLOGUE_MULTISITE`` is ``True``.
def test_auto_add_sites(self): with self.settings(PHOTOLOGUE_MULTISITE=False): gallery = GalleryFactory() photo = PhotoFactory() self.assertEqual(list(gallery.sites.all()), [self.site1]) self.assertEqual(list(photo.sites.all()), [self.site1]) photo.delete() with self.settings(PHOTOLOGUE_MULTISITE=True): gallery = GalleryFactory() photo = PhotoFactory() self.assertEqual(list(gallery.sites.all()), []) self.assertEqual(list(photo.sites.all()), []) photo.delete()
[ "def is_singleton(item):\n return isinstance(item, Item) and not item.album_id", "def check_magento_structure(self):\n for backend in self:\n websites = backend.website_ids\n if not websites:\n backend.synchronize_metadata()\n return True", "def test_exclude_uwum_app(self):\n socialapp_1 = SocialApp.objects.create(\n provider='facebook',\n name='Facebook',\n client_id='xxxxxxxxxxxxxxxxxx',\n secret='xxxxxxxxxxxxxxxxxx',\n key=''\n )\n socialapp_2 = SocialApp.objects.create(\n provider='twitter',\n name='Twitter',\n client_id='xxxxxxxxxxxxxxxxxx',\n secret='xxxxxxxxxxxxxxxxxx',\n key=''\n )\n socialapp_3 = SocialApp.objects.create(\n provider='uwum',\n name='UWUM',\n client_id='xxxxxxxxxxxxxxxxxx',\n secret='',\n key=''\n )\n\n socialapps = wegovnow.exclude_uwum_app(get_social_apps())\n\n self.assertTrue(socialapp_1 in socialapps)\n self.assertTrue(socialapp_2 in socialapps)\n self.assertFalse(socialapp_3 in socialapps)", "def set_sites(self, request):\n if settings.CMS_PERMISSION:\n self.sites = get_user_sites_queryset(request.user) \n else:\n self.sites = Site.objects.all()\n self.has_access_to_multiple_sites = len(self.sites) > 1", "def do_default_site(self, using=DEFAULT_DB):\r\n\r\n if not len(self.sites.all()):\r\n sites = Site.objects.all()\r\n if hasattr(sites, 'using'):\r\n sites = sites.using(using)\r\n self.sites.add(sites.get(pk=settings.SITE_ID))\r\n return True\r\n\r\n return False", "def singleton():\n return site.Site.get_or_insert('site')", "def testShareDuplicatePhotos(self):\n share_list = [{'existing_episode_id': self._episode_id2,\n 'new_episode_id': self._existing_ep_id,\n 'photo_ids': self._photo_ids2}]\n self._tester.ShareExisting(self._cookie, self._existing_vp_id, share_list)\n self._tester.ShareExisting(self._cookie, self._existing_vp_id, share_list)", "def test_anonymousSite(self):\n resource = self.store.findUnique(AnonymousSite)\n self.assertEqual(list(self.store.interfacesFor(resource)),\n [IResource, IMantissaSite, IWebViewer])\n self.assertIdentical(installedOn(resource), self.store)\n self.assertIdentical(resource.loginSystem, IRealm(self.store))", "def is_site_pin_for(self, site, bel_pin_index):\n # BEL pins are not site pins for other BEL pins.\n return False", "def _validate_site_addability(site):\n if not isinstance(site, Atom):\n raise TypeError(\"Argument {} is not a Site. See gmso/core/atom.py\")\n # TODO: Some sort of a check on site.parent\n return site", "def ignore_object(self, object_name, strict=False):\n if object_name in (EXPERIMENT, NEIGHBORS,):\n return True\n if strict and self.objects_choice == O_NONE:\n return True\n if strict and self.objects_choice == O_SELECT and object_name != \"Image\":\n return object_name not in self.objects_list.selections\n return False", "def wrist_site(self):\n raise NotImplementedError", "def graph_object_is_unmanaged_asset(graph_obj: Dict) -> bool:\r\n return graph_obj.get(\"type\") == \"vm\" and graph_obj.get(\"id\", '').startswith(\"ip:\")", "def create_social_platform_duplicates_influencer_checks():\n s = suspicions.SuspectDuplicateSocial()\n plat_names = models.Platform.SOCIAL_PLATFORMS_CRAWLED\n\n for name in plat_names:\n s.report_all(name)", "def disable_static_site(client, args):\n parser = argparse.ArgumentParser(PLUGIN_BASE+' du')\n\n parser.add_argument('bucket', metavar='BUCKET', type=str, nargs='?',\n help=\"The bucket to disable static site for.\")\n\n parsed = parser.parse_args(args)\n\n # get the bucket\n try:\n bucket = client.get_bucket(parsed.bucket)\n except S3ResponseError:\n print('No bucket named '+parsed.bucket)\n sys.exit(2)\n\n # make the site\n bucket.delete_website_configuration()\n print('Website configuration deleted for {}'.format(parsed.bucket))", "def test_disabled_feature_forbidden_update_local_site(self):\n self._test_method(\n 'put', False, dummy=123,\n local_site=LocalSite.objects.get(name='local-site-1'))", "def sites(self):\n projection = ['name', 'onload']\n # TODO onload for local is true\n site_id = config.get_item('site', 'id')\n if self.public_request or self.is_true('all'):\n sites = list(config.db.sites.find(None, projection))\n else:\n # TODO onload based on user prefs\n remotes = (config.db.users.find_one({'_id': self.uid}, ['remotes']) or {}).get('remotes', [])\n remote_ids = [r['_id'] for r in remotes] + [site_id]\n sites = list(config.db.sites.find({'_id': {'$in': remote_ids}}, projection))\n for s in sites: # TODO: this for loop will eventually move to public case\n if s['_id'] == site_id:\n s['onload'] = True\n break\n return sites", "def unique(self):\r\n if self.id or self.process:\r\n return self.process.slug == \"upload-metadata-unique\"\r\n\r\n # If no info, consider this true by default\r\n return True", "def _generate_non_autopayable_entities(self):\n self.non_autopay_account = generator.billing_account(\n web_user_creator=generator.arbitrary_web_user(is_dimagi=True),\n web_user_contact=self.autopay_user\n )\n self.non_autopay_domain = generator.arbitrary_domain()\n # Non-autopay subscription has same parameters as the autopayable subscription\n self.non_autopay_subscription = generator.generate_domain_subscription(\n self.non_autopay_account,\n self.non_autopay_domain,\n date_start=self.subscription.date_start,\n date_end=add_months_to_date(self.subscription.date_start, self.subscription_length),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to return calibrated values, this is done when the calibration is done
def Calibrated(self): peaklist = self.PeakValues.copy() try: peaklist = np.array(peaklist) * self.k return peaklist except Exception as E: raise E
[ "def perform_ground_calibration(self): \n zero = 0\n noise = 0\n #TODO implement\n return zero, noise", "def get_camera_calibration_values():\n\tcalibration_images = glob.glob('./camera_cal/calibration*.jpg')\n\treturn __calibrate_camera(calibration_images)", "def getCalibratedSensorData(self):\n\n if self.state != '#oscb': self.__setState('#oscb')\n self.bus.write(\"#f\")\n output = self.bus.read(36)\n self.__update(output)\n\n return output", "def test_get_calibrated_values():\n _setup()\n\n values = as7262.get_calibrated_values()\n\n # Deal with floating point nonsense\n values = [round(x, 1) for x in values]\n\n assert values == CALIBRATED_VALUES", "def update(self):\n if self._calib_fcn is None:\n raise TypeError('Calibration function not set')\n if self._calib is None:\n if self._calib_orig is None:\n raise TypeError('Calibration object not set')\n else:\n self.calib = self._calib_orig\n\n self.data, self.units = self.calib_fcn(self.calib) # pylint: disable=not-callable", "def get_calibration(self):\n # Switch to configuration mode, as mentioned in section 3.10.4 of datasheet.\n self._config_mode()\n # Read the 22 bytes of calibration data and convert it to a list (from\n # a bytearray) so it's more easily serialized should the caller want to\n # store it.\n cal_data = list(self._read_bytes(ACCEL_OFFSET_X_LSB_ADDR, 22))\n # Go back to normal operation mode.\n self._operation_mode()\n return cal_data", "def calibrate(self):\n\n # calibrate the eye tracker in five steps\n print \"CENTER HORIZONTAL\"\n self.center_horizontal()\n\n print \"CENTER VERTICAL\"\n self.center_vertical()\n\n print \"CENTER DEPTH\"\n self.center_depth_faster()\n\n #print \"ALIGN PUPIL AND CR\"\n #self.align_pupil_and_CR()\n\n print \"FIND PUPIL RADIUS\"\n self.find_pupil_radius()\n\n print \"d = \", self.d\n print \"Rp[mm] = \", self.Rp_mm", "def calibrating(self):\n self.current = State.CALIBRATING", "def get_values(self):\n data_raw = self.poll_data()\n\n data_processed = [self.det_from_acq_dev[acq_dev].process_data(d)\n for acq_dev, d in data_raw.items()]\n data_processed = np.concatenate(data_processed)\n if self.correlated:\n if not self.detectors[0].get_values_function_kwargs.get(\n 'averaged', True):\n data_for_corr = data_processed\n else:\n data_for_corr = np.concatenate([d for d in data_raw.values()])\n corr_data = self.get_correlations_classif_det(data_for_corr)\n data_processed = np.concatenate([data_processed, corr_data], axis=0)\n\n return data_processed", "def getTotalCalibration(self):\n return len(self.map_df.loc[self.map_df['TYPE'] == PointType.calibrated])", "def refresh_calibrationImage(self):\n cal = self.constraints[self.calibrating_color]\n hn = cal['h_min']\n hx = cal['h_max']\n sn = cal['s_min']\n sx = cal['s_max']\n vn = cal['v_min']\n vx = cal['v_max']\n\n def fill(h, img):\n for y in range(0, 256):\n v = (vx - vn) * y / 256 + vn\n for x in range(0, 256):\n s = (sx - sn) * x / 256 + sn\n\n img[y, x, 0] = h\n img[y, x, 1] = s\n img[y, x, 2] = v\n\n fill(hn, self.swatchn)\n fill(hx, self.swatchx)\n self.swatchn = cv.cvtColor(self.swatchn, cv.COLOR_HSV2BGR)\n self.swatchx = cv.cvtColor(self.swatchx, cv.COLOR_HSV2BGR)\n hor = np.hstack((self.swatchn, self.swatchx))\n cv.imshow(\"cal\", hor)", "def calib_reader(filename):\r\n os.chdir(r'C:\\Users\\jensj\\OneDrive\\Skrivebord\\Beregninger\\Calculated\\Calibs')\r\n txt = []; U_cal = []; V_cal = [];\r\n infile = open(filename,'r')\r\n for line in infile:\r\n txt = line.split()\r\n U_cal.append(float(txt[0]))\r\n V_cal.append(float(txt[1]))\r\n infile.close()\r\n return(U_cal,V_cal)", "def _load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the rigid transformation from IMU to velodyne\n data[\"T_velo_imu\"] = self._load_calib_rigid(\"calib_imu_to_velo.txt\")\n\n # Load the camera intrinsics and extrinsics\n data.update(self._load_calib_cam_to_cam(\"calib_velo_to_cam.txt\", \"calib_cam_to_cam.txt\"))\n\n # Pre-compute the IMU to rectified camera coordinate transforms\n data[\"T_cam0_imu\"] = data[\"T_cam0_velo\"].dot(data[\"T_velo_imu\"])\n data[\"T_cam1_imu\"] = data[\"T_cam1_velo\"].dot(data[\"T_velo_imu\"])\n data[\"T_cam2_imu\"] = data[\"T_cam2_velo\"].dot(data[\"T_velo_imu\"])\n data[\"T_cam3_imu\"] = data[\"T_cam3_velo\"].dot(data[\"T_velo_imu\"])\n\n return data", "def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if self.red_glyphs.mlab_source.scalars[i] != self.hi_color:\n print(i)\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n corr=corr*n/cr_sum**2.-1.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr", "def calibration(self):\n try:\n backend_name = self.configuration['name']\n calibrations = self._api.backend_calibration(backend_name)\n # FIXME a hack to remove calibration data that is none.\n # Needs to be fixed in api\n if backend_name == 'ibmqx_hpc_qasm_simulator':\n calibrations = {}\n # FIXME a hack to remove calibration data that is none.\n # Needs to be fixed in api\n if backend_name == 'ibmqx_qasm_simulator':\n calibrations = {}\n except Exception as ex:\n raise LookupError(\n \"Couldn't get backend calibration: {0}\".format(ex))\n\n calibrations_edit = {}\n for key, vals in calibrations.items():\n new_key = _snake_case_to_camel_case(key)\n calibrations_edit[new_key] = vals\n\n return calibrations_edit", "def calibrate(self):\n if not self._calibrate:\n raise ValueError(\"calibrate parameter must be set\")\n\n if self._calibration_samples >= self._max_samples:\n return self._camera\n\n frame = self.capture()\n\n if self._last_timestamp is None:\n self._last_timestamp = frame.timestamp\n\n if (frame.timestamp - self._last_timestamp).total_seconds() > self._frame_delay:\n ret, corners = frame.images[0].features\n if ret is True:\n self._objpoints.append(self._objp)\n self._imgpoints.append(corners)\n\n self._calibration_samples += 1\n self._last_timestamp = frame.timestamp\n\n if self._calibration_samples >= self._max_samples:\n img = frame.images[0].image\n shape = img.shape[::-1]\n self._camera = self._finish_calibration(self._objpoints, self._imgpoints, shape)\n return self._camera", "def _load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(\n self.base_path, 'calib/{}.txt'.format(self.sequence))\n filedata = utils.read_calib_file(calib_filepath)\n\n # Create 3x4 projection matrices\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\n\n data['P_rect_00'] = P_rect_00\n data['P_rect_10'] = P_rect_10\n data['P_rect_20'] = P_rect_20\n data['P_rect_30'] = P_rect_30\n\n # Create 4x4 matrices from the rectifying rotation matrices\n R_rect_00 = np.eye(4)\n R_rect_00[0:3, 0:3] = np.reshape(filedata['R_rect'], (3, 3))\n data['R_rect_00'] = R_rect_00\n\n # Compute the rectified extrinsics from cam0 to camN\n T1 = np.eye(4)\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\n T2 = np.eye(4)\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\n T3 = np.eye(4)\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\n\n # # Compute the velodyne to rectified camera coordinate transforms\n T_cam_velo = filedata['Tr_velo_cam'].reshape((3, 4))\n T_cam_velo = np.vstack([T_cam_velo, [0, 0, 0, 1]])\n data['T_cam0_velo'] = R_rect_00.dot(T_cam_velo)\n data['T_cam1_velo'] = T1.dot(R_rect_00.dot(T_cam_velo))\n data['T_cam2_velo'] = T2.dot(R_rect_00.dot(T_cam_velo))\n data['T_cam3_velo'] = T3.dot(R_rect_00.dot(T_cam_velo))\n\n # # Compute the camera intrinsics\n data['K_cam0'] = P_rect_00[0:3, 0:3]\n data['K_cam1'] = P_rect_10[0:3, 0:3]\n data['K_cam2'] = P_rect_20[0:3, 0:3]\n data['K_cam3'] = P_rect_30[0:3, 0:3]\n\n # Compute the stereo baselines in meters by projecting the origin of\n # each camera frame into the velodyne frame and computing the distances\n # between them\n p_cam = np.array([0, 0, 0, 1])\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\n\n self.calib = namedtuple('CalibData', data.keys())(*data.values())", "def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if str(i) in self.usable_data:\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n corr=corr*n/cr_sum**2.-1.\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr", "def calibrate_main(self):\n kinect_points = []\n baxter_points = []\n \n # List of valid Poses that will be run through during calibration\n pose_list = [[0.967549571713 , -0.243194570899 , -0.0147116101372 , 0.189819515383 , 0.576035463292 , 0.565770228928 , 0.558619499005],\n [1.00591841119 , 0.0653722802741 , 0.0390316996546 , 0.152757801693 , 0.59959204309 , 0.54160654591 , 0.569049018374],\n [0.660820491796 , -0.177701888979 , 0.113357076542 , 0.173421623611 , -0.677841472218 , -0.63695170379 , -0.323648584184],\n [0.905797337361 , -0.331304537267 , 0.0462269328641 , 0.0493525714893 , -0.721181615556 , -0.56377896248 , -0.39951806284],\n [0.772376384097 , 0.0978054138748 , -0.026135473969 , 0.201319020728 , -0.601787830146 , -0.664505383498 , -0.394657642373],\n [0.811704350811 , -0.103656896994 , 0.141110073754 , 0.143214065634 , -0.594841017502 , -0.739429664627 , -0.280887284104],\n [0.755979226564 , -0.117592203468 , 0.150116490599 , 0.268454143496 , -0.711205271974 , -0.541262411561 , -0.359380628007],\n [0.979268205741 , 0.159483927641 , 0.169687141748 , 0.085900093927 , -0.69086093398 , -0.497660362668 , -0.517364965177],\n [0.696525868986 , 0.0386367320766 , 0.102296723968 , 0.174107553829 , -0.596925269821 , -0.690000302377 , -0.3704947566],\n [0.814383547029 , -0.114150948284 , 0.117337537566 , 0.256909164836 , -0.730541492608 , -0.479022752633 , -0.41333280908],\n [1.07873925387 , -0.0206345594802 , 0.228813657798 , 0.241138278494 , 0.520589892643 , 0.601439839116 , 0.555975371978],\n [0.809596487214 , -0.186215963667 , 0.0406577192334 , 0.136382316321 , 0.572872574265 , 0.617914774945 , 0.520958931543],\n [0.583496017246 , 0.0913085281482 , 0.0735311335765 , 0.178854233934 , -0.634977291594 , -0.660956918962 , -0.357702325182],\n [0.753218240633 , -0.0994920476031 , -0.0329215926621 , 0.0724749338894 , -0.64398059595 , -0.644760658705 , -0.405364119013],\n [0.990711394828 , -0.286979140017 , 0.0288606526987 , 0.186902642758 , 0.567888846611 , 0.639916747179 , 0.482779677187],\n [0.971247139794 , 0.238743820558 , 0.242769320113 , 0.159909170901 , 0.384929043639 , 0.660235113347 , 0.624778587604],\n [0.726572230958 , -0.219190090899 , 0.19089983206 , 0.160700064095 , -0.596639840593 , -0.739102334662 , -0.268186742615],\n [0.640192523703 , -0.107968230285 , 0.230995078633 , 0.225368689666 , -0.61504059185 , -0.682986068997 , -0.323208993765],\n [0.885666308204 , 0.179141109904 , 0.167234926829 , 0.0348897624627 , -0.519551753777 , -0.714712882343 , -0.466941297633],\n [0.741446231388 , -0.0818345763105 , 0.0555464394075 , 0.190399201766 , -0.632430315906 , -0.71040075171 , -0.243127150807],\n [1.01323608274 , -0.133186233845 , -0.021826983584 , 0.138802425528 , 0.655369312371 , 0.582661815133 , 0.460141456788],\n [1.03260814353 , 0.0346221999128 , 0.132681545664 , 0.152929969979 , 0.527931656776 , 0.665691023969 , 0.504733643284],\n [0.784673172817 , 0.230747438213 , 0.0473197447253 , 0.168223152068 , -0.503451364408 , -0.806335531932 , -0.260884466245],\n [0.780593767564 , -0.17977555903 , 0.0408083386682 , 0.165445612739 , -0.603657068794 , -0.743499201958 , -0.235446021859],\n [0.958648177818 , -0.363982170786 , 0.223764130815 , 0.0889160271122 , 0.693497085579 , 0.633182445796 , 0.332017654278],\n [0.892436390819 , 0.102284104689 , 0.0413739748571 , 0.031729435966 , 0.669928169245 , 0.65825398922 , 0.341893516497],\n [0.870188202725 , -0.0219473395379 , 0.0942454996925 , 0.195686453524 , -0.535399360374 , -0.708099812793 , -0.416712121182],\n [0.721524918703 , -0.0685114050026 , 0.107947295647 , 0.215929700552 , -0.598127059979 , -0.729024735834 , -0.253261365159],\n [0.754605545006 , -0.270272463086 , 0.213347148072 , 0.184106655957 , -0.582021981644 , -0.747339452982 , -0.262371671741],\n [0.921488386934 , -0.146508175922 , -0.0681952879441 , 0.229020339028 , 0.641492396891 , 0.617573303821 , 0.393243440441]] \n \n \n \n points_detected = False\n \n point = 0\n \n print \"\\n\\nStaring Calibration:\"\n \n # Loop through list of poses and move to each\n for pose in pose_list:\n\n self.move_to_calibration_pose(pose)\n # Wait for rgb to catch up\n time.sleep(2)\n print \"Number of points collected: \", len(kinect_points)\n while not points_detected:\n Bx, By, Bz, Ww, Wx, Wy, Wz = self.return_current_pose(\"right\")\n if (self.rgb_img is not None):\n try:\n points_detected, Kx, Ky, Kz = self.get_marker()\n except:\n points_detected = False\n print \"Exception\"\n if points_detected:\n #print Bx, ',', By, ',', Bz, ',', Ww, ',', Wx, ',', Wy, ',', Wz\n baxter_points.append([Bx, By, Bz])\n kinect_points.append([Kx, Ky, Kz])\n print \"Kinect: \" + str(kinect_points[point])\n print \"Baxter: \" + str(baxter_points[point])\n point += 1\n time.sleep(2)\n points_detected = False\n \n kinect_points = np.asmatrix(kinect_points)\n baxter_points = np.asmatrix(baxter_points)\n \n time.sleep(1)\n \n self.R, self.t = self.calculate_transform(kinect_points, baxter_points)\n \n print \"\\nTesting Result: \\n\"\n print \"Original Baxter point: \\n\" + str(baxter_points[0])\n cal_bax_point = self.calculate_translated_point(kinect_points[0])\n print \"\\n\\nCalculated Baxter point: \\n\" + str(cal_bax_point)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the vrrp_port (instance port) in case nova didn't This can happen if a failover has occurred.
def deallocate_vip(self, vip): for amphora in six.moves.filter(self._filter_amphora, vip.load_balancer.amphorae): try: self.neutron_client.delete_port(amphora.vrrp_port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): LOG.debug('VIP instance port %s already deleted. Skipping.', amphora.vrrp_port_id) try: port = self.get_port(vip.port_id) except base.PortNotFound: msg = ("Can't deallocate VIP because the vip port {0} cannot be " "found in neutron".format(vip.port_id)) raise base.VIPConfigurationNotFound(msg) self._delete_security_group(vip, port) if port.device_owner == OCTAVIA_OWNER: try: self.neutron_client.delete_port(vip.port_id) except Exception: message = _('Error deleting VIP port_id {port_id} from ' 'neutron').format(port_id=vip.port_id) LOG.exception(message) raise base.DeallocateVIPException(message) else: LOG.info("Port %s will not be deleted by Octavia as it was " "not created by Octavia.", vip.port_id)
[ "def deallocate_vip(self, vip):\n try:\n for amphora in vip.load_balancer.amphorae:\n try:\n self.network_proxy.delete_port(amphora.vrrp_port_id)\n except os_exceptions.ResourceNotFound:\n LOG.debug(\n 'VIP instance port %s already deleted. Skipping.',\n amphora.vrrp_port_id)\n except AttributeError as ex:\n LOG.warning(f\"Cannot delete port from amphorae. Object does not \"\n f\"exist ({ex!r})\")\n\n try:\n port = self.get_port(vip.port_id)\n except base.PortNotFound:\n LOG.warning(\"Can't deallocate VIP because the vip port %s \"\n \"cannot be found in neutron. \"\n \"Continuing cleanup.\", vip.port_id)\n port = None\n\n self._delete_security_group(vip, port)\n\n if port and port.device_owner == OCTAVIA_OWNER:\n try:\n self.network_proxy.delete_port(vip.port_id)\n except os_exceptions.ResourceNotFound:\n LOG.debug('VIP port %s already deleted. Skipping.',\n vip.port_id)\n except Exception as e:\n message = _('Error deleting VIP port_id {port_id} from '\n 'neutron').format(port_id=vip.port_id)\n LOG.exception(message)\n raise base.DeallocateVIPException(message) from e\n elif port:\n LOG.info(\"Port %s will not be deleted by Octavia as it was \"\n \"not created by Octavia.\", vip.port_id)", "def delete(self):\n for port in self.ports:\n port.delete()\n self.ports = []\n self.subnet.close()", "def del_instance(ip,port):\n err_code=ERR_CODE_DEFAULT\n if not (ip or port):\n return ERR_CODE_INVALID\n try:\n result,msg=instance_manage.InstanceManage({\"instance_ip\":ip,\"instance_port\":port}).stat_instance(True)\n if result:\n err_code=ERR_CODE_SUCCESS\n log.info(\"Instance has been deleted:%s:%s\",ip,port)\n else:\n err_code=ERR_CODE_UNKOWN\n log.error(\"Instance deleted failed:%s:%s %s\",ip,port,msg)\n except Exception as ex:\n err_code=ERR_CODE_UNKOWN\n log.error(ex)\n return err_code", "def test_model_delete_port_rollback(self):\n with self._create_port_res() as res:\n\n # After port is created, we should have one binding for this\n # vlan/nexus switch.\n port = self.deserialize(self.fmt, res)\n start_rows = nexus_db_v2.get_nexusvlan_binding(self.vlan_start,\n self.switch_ip)\n self.assertEqual(len(start_rows), 1)\n\n # Inject an exception in the OVS plugin delete_port\n # processing, and attempt a port deletion.\n inserted_exc = q_exc.Conflict\n expected_http = base.FAULT_MAP[inserted_exc].code\n with mock.patch.object(l3_db.L3_NAT_db_mixin,\n 'disassociate_floatingips',\n side_effect=inserted_exc):\n self._delete('ports', port['port']['id'],\n expected_code=expected_http)\n\n # Confirm that the Cisco model plugin has restored\n # the nexus configuration for this port after deletion failure.\n end_rows = nexus_db_v2.get_nexusvlan_binding(self.vlan_start,\n self.switch_ip)\n self.assertEqual(start_rows, end_rows)", "def test_delete_logical_router_port(self):\n lrport = self._mocked_lrport()\n\n uuid = test_constants_v3.FAKE_ROUTER_PORT['id']\n lrport.delete(uuid)\n test_client.assert_json_call(\n 'delete', lrport,\n 'https://1.2.3.4/api/v1/logical-router-ports/%s' % uuid)", "def delete_port(self, port_name):\n command = ovs_vsctl.VSCtlCommand(\n 'del-port', (self.br_name, port_name), '--if-exists')\n self.run_command([command])", "def test_Bridge_orport_del(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.orPort, 36489)\n\n del(self.bridge.orPort)\n self.assertIsNone(self.bridge.orPort)\n self.assertIsNone(self.bridge._orPort)", "def delete_port(port):\n return IMPL.delete_port(port)", "def delete_agent_gateway_port(self, context, **kwargs):\n network_id = kwargs.get('network_id')\n host = kwargs.get('host')\n admin_ctx = neutron_context.get_admin_context()\n self.l3plugin.delete_floatingip_agent_gateway_port(\n admin_ctx, host, network_id)", "def _port_unbound_update(self, context, port):\n LOG.info(\"Port becoming unbound: destroy.\")\n self.transport.endpoint_deleted(port)", "def _delete_current_gw_port(self, context, router_id, router,\n new_network_id, request_body=None):\n port_requires_deletion = (\n router.gw_port and router.gw_port['network_id'] != new_network_id)\n if not port_requires_deletion:\n return\n admin_ctx = context.elevated()\n old_network_id = router.gw_port['network_id']\n\n if self.router_gw_port_has_floating_ips(admin_ctx, router_id):\n raise l3_exc.RouterExternalGatewayInUseByFloatingIp(\n router_id=router_id, net_id=router.gw_port['network_id'])\n gw_ips = [x['ip_address'] for x in router.gw_port['fixed_ips']]\n gw_port_id = router.gw_port['id']\n self._delete_router_gw_port_db(context, router, request_body)\n if db_api.is_session_active(admin_ctx.session):\n # TODO(ralonsoh): ML2 plugin \"delete_port\" should be called outside\n # a DB transaction. In this case an exception is made but in order\n # to prevent future errors, this call should be moved outside\n # the current transaction.\n admin_ctx.GUARD_TRANSACTION = False\n self._core_plugin.delete_port(\n admin_ctx, gw_port_id, l3_port_check=False)\n # TODO(boden): normalize metadata\n metadata = {'network_id': old_network_id,\n 'new_network_id': new_network_id,\n 'gateway_ips': gw_ips}\n registry.publish(resources.ROUTER_GATEWAY,\n events.AFTER_DELETE, self,\n payload=events.DBEventPayload(\n context, states=(router,),\n metadata=metadata,\n resource_id=router_id))", "def delete_vpn_connection(DryRun=None, VpnConnectionId=None):\n pass", "def delete_interfaces_interface_routed_vlan_ipv4_addresses_address_vrrp_vrrp_by_id(name, ip): # noqa: E501\n return 'do some magic!'", "def test_nexus_delete_port_rollback(self):\n with self._create_port_res() as res:\n\n port = self.deserialize(self.fmt, res)\n\n # Check that there is only one binding in the nexus database\n # for this VLAN/nexus switch.\n start_rows = nexus_db_v2.get_nexusvlan_binding(self.vlan_start,\n self.switch_ip)\n self.assertEqual(len(start_rows), 1)\n\n # Simulate a Nexus switch configuration error during\n # port deletion.\n with self._patch_ncclient(\n 'manager.connect.return_value.edit_config.side_effect',\n AttributeError):\n self._delete('ports', port['port']['id'],\n base.FAULT_MAP[c_exc.NexusConfigFailed].code)\n\n # Confirm that the binding has been restored (rolled back).\n end_rows = nexus_db_v2.get_nexusvlan_binding(self.vlan_start,\n self.switch_ip)\n self.assertEqual(start_rows, end_rows)", "def test_dp_disconnect_cleanup(self):\n valve = self.valves_manager.valves[self.DP_ID]\n port_num = list(valve.dp.ports.keys())[0]\n self.port_expected_status(port_num, 1)\n self.disconnect_dp()\n self.port_expected_status(port_num, 0)", "def delete_qrouter_connection(self, vlan, link):\n\n if self.test:\n return True\n try:\n ns_qouter = '{}-qrouter'.format(str(vlan))\n qrouter_ovs_veth = '{}-vethOQ'.format(str(vlan))\n qrouter_ns_veth = '{}-vethQO'.format(str(vlan))\n qrouter_br_veth = '{}-vethBQ'.format(str(vlan))\n qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))\n\n command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)\n self.run_command(command, ignore_exit_status=True)\n\n # down ns veth\n command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)\n self.run_command(command, ignore_exit_status=True)\n\n command = 'sudo ip netns exec {} ip link delete {} '.format(ns_qouter, qrouter_ns_veth)\n self.run_command(command, ignore_exit_status=True)\n\n command = 'sudo ip netns del ' + ns_qouter\n self.run_command(command)\n\n # down ovs veth interface\n command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)\n self.run_command(command, ignore_exit_status=True)\n\n # down br veth interface\n command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)\n self.run_command(command, ignore_exit_status=True)\n\n # delete veth interface\n command = 'sudo ip link delete {} '.format(link, qrouter_ovs_veth)\n self.run_command(command, ignore_exit_status=True)\n\n # down br veth interface\n command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)\n self.run_command(command, ignore_exit_status=True)\n\n # delete veth interface\n command = 'sudo ip link delete {} '.format(link, qrouter_ns_router_veth)\n self.run_command(command, ignore_exit_status=True)\n\n # down br veth interface\n command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)\n self.run_command(command)\n\n # delete NS\n return True\n except RunCommandException as e:\n self.logger.error(\"delete_qrouter_connection ssh Exception: {}\".format(str(e)))\n return False", "def delete_port(self, port_name):\n\n try:\n port_num = self.get_port_number(port_name)\n\n mask = np.arange(len(self.ports)) != port_num\n s = self.s[mask]\n self.s = s[:,mask]\n self.z0 = self.z0[mask]\n\n ports = list(self.ports)\n ports.remove(port_name)\n self.ports = tuple(ports)\n\n except:\n print(\"The \\\"{}\\\" port does not exist.\".format(port_name))", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def del_port(self, user, port):\n try:\n self.c.execute(sql['del_port'], (user, port))\n self.c.execute(sql['del_stocks'], (user, port))\n self.db.commit()\n except sqlite3.Error as e:\n self.db.rollback()\n flash(\"Can't delete port because \"+str(e))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Waits for the amphora ports device_id to be unset. This method waits for the ports on an amphora device_id parameter to be '' or None which signifies that nova has finished detaching the port from the instance.
def wait_for_port_detach(self, amphora): interfaces = self.get_plugged_networks(compute_id=amphora.compute_id) ports = [] port_detach_timeout = CONF.networking.port_detach_timeout for interface_ in interfaces: port = self.get_port(port_id=interface_.port_id) ips = port.fixed_ips lb_network = False for ip in ips: if ip.ip_address == amphora.lb_network_ip: lb_network = True if not lb_network: ports.append(port) for port in ports: try: neutron_port = self.neutron_client.show_port( port.id).get('port') device_id = neutron_port['device_id'] start = int(time.time()) while device_id: time.sleep(CONF.networking.retry_interval) neutron_port = self.neutron_client.show_port( port.id).get('port') device_id = neutron_port['device_id'] timed_out = int(time.time()) - start >= port_detach_timeout if device_id and timed_out: message = ('Port %s failed to detach (device_id %s) ' 'within the required time (%s s).' % (port.id, device_id, port_detach_timeout)) raise base.TimeoutException(message) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): pass
[ "def detach_and_delete_ports(connection, node, created_ports, attached_ports):\n for port_id in set(attached_ports + created_ports):\n LOG.debug('Detaching port %(port)s from node %(node)s',\n {'port': port_id, 'node': _utils.log_res(node)})\n try:\n connection.baremetal.detach_vif_from_node(node, port_id)\n except Exception as exc:\n LOG.debug('Failed to remove VIF %(vif)s from node %(node)s, '\n 'assuming already removed: %(exc)s',\n {'vif': port_id, 'node': _utils.log_res(node),\n 'exc': exc})\n\n for port_id in created_ports:\n LOG.debug('Deleting port %s', port_id)\n try:\n connection.network.delete_port(port_id, ignore_missing=False)\n except Exception as exc:\n LOG.warning('Failed to delete neutron port %(port)s: %(exc)s',\n {'port': port_id, 'exc': exc})\n else:\n LOG.info('Deleted port %(port)s for node %(node)s',\n {'port': port_id, 'node': _utils.log_res(node)})", "def detach_port(self, instance_obj, network_obj):\n raise NotImplementedError()", "def test_dp_disconnect_cleanup(self):\n valve = self.valves_manager.valves[self.DP_ID]\n port_num = list(valve.dp.ports.keys())[0]\n self.port_expected_status(port_num, 1)\n self.disconnect_dp()\n self.port_expected_status(port_num, 0)", "def detach_usb_device(self, id_p, done):\n if not isinstance(id_p, basestring):\n raise TypeError(\"id_p can only be an instance of type basestring\")\n if not isinstance(done, bool):\n raise TypeError(\"done can only be an instance of type bool\")\n self._call(\"detachUSBDevice\",\n in_p=[id_p, done])", "def detach_and_delete_ports(self):\n detach_and_delete_ports(self._connection, self._node,\n self.created_ports, self.attached_ports)", "def detachDeviceLink(self, card, pciaddr, type):\n self.currentCard = card\n result = self.readDeviceLink(pciaddr, type)\n if result:\n addr = self.pci2virsh(pciaddr)\n command = [\"virsh\", \"nodedev-detach\", \"%s\" % addr] \n out, err = self.runSubprocess(command)\n if out.find (\"Device %s detached\" % addr) > -1 :\n print \"Detached GPU card '%s' %s device %s\" % (self.currentCard, type, pciaddr)\n return 0\n if err:\n print err\n return 1\n else:\n print \"GPU card '%s' %s device %s is already detached\" % (self.currentCard, type, pciaddr)\n return 0", "def _port_unbound_update(self, context, port):\n LOG.info(\"Port becoming unbound: destroy.\")\n self.transport.endpoint_deleted(port)", "def unconfigure_tenant_networks(self, task):\n for port in task.ports:\n extra_dict = port.extra\n extra_dict.pop('vif_port_id', None)\n port.extra = extra_dict\n port.save()", "def disconnect(self, device):", "def detachGPU(self):\n cards = self.requestedCards()\n for c in cards:\n if len(self.cards[c]) == 2:\n pciV, pciA = self.cards[c]\n self.detachDeviceLink(c, pciV, \"video\")\n self.detachDeviceLink(c, pciA, \"audio\")\n if len(self.cards[c]) == 1:\n pciV = self.cards[c][0]\n self.detachDeviceLink(c, pciV, \"video\")", "def block_until_cjdroute_port_is_free():\n log.info(\"Waiting until the cjdroute port becomes available\")\n wait(\n check_if_port_available_factory(conf().CJDNS_DEFAULT_PORT),\n timeout=WAIT_FOR_CJDROUTE_PORT_TIMEOUT\n )", "def deprovision(self):\n self.DockerManager.releasePort(self.ssh_port)\n self.DockerManager.removeContainer(self.id)\n self.users = dict()\n self.ssh_port=22", "def _WaitUntilStarted(self):\n if not self.port:\n self._ReadPortNumber()\n\n try:\n timeout_util.WaitForReturnTrue(self.IsReady,\n timeout=self.DEV_SERVER_TIMEOUT,\n period=5)\n except timeout_util.TimeoutError:\n self.terminate()\n raise DevServerStartupError('Devserver did not start')", "def test_Bridge_orport_del(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.orPort, 36489)\n\n del(self.bridge.orPort)\n self.assertIsNone(self.bridge.orPort)\n self.assertIsNone(self.bridge._orPort)", "def detach_from(self, host=None):\n raise NotImplementedError()", "def _detach_virtual_cd(self, task):\n driver_info = _parse_driver_info(task.node)\n hw = _get_hw_library(driver_info)\n hw.detach_virtual_cd(driver_info, task)", "def ex_node_detach_interface(self, node, iface):\r\n op = self.connection.request('hosting.vm.iface_detach',\r\n int(node.id), int(iface.id))\r\n if self._wait_operation(op.object['id']):\r\n return True\r\n return False", "def disconnect(self, port=CONFIG.SWITCH.ACRONAME_PORT, verbose=True, *args, **kwargs):\n if verbose:\n self.logger.info('Disconnecting USB{} port...'.format(port))\n out = self.switch.setPortDisable(port)\n if verbose:\n self.logger.done()\n self.switchlogger.info('%s disable port [%d]: %s' % (ACRONAME_TAG, port, out))", "def bt_stop_discovery(bt):\r\n if bt:\r\n bt.stop_discovery()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> stair(0) 0 >>> stair(1) 1 >>> stair(2) 2 >>> stair(3) 3 >>> stair(4) 5
def stair(stair_n): def stair_s(stair_num): if stair_num in range(0, 2): return stair_num else: return stair_s(stair_num-1) + stair_s(stair_num-2) if stair_n == 0: return 0 else: return stair_s(stair_n+1)
[ "def stairs(n):\n ### Your code here ###\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return stairs(n-1) + stairs(n-2)", "def stairs(n):\n if n <= 2:\n return n\n if n == 3:\n return 4\n return stairs(n-1) + stairs(n-2) + stairs(n-3)", "def init_snake():\n\n head = Square(5, 4)\n tail = Square(6, 4, ahead=head)\n\n return head, tail", "def climbStairs(self, n: int) -> int:\n return 1 if n == 1 else 2 if n == 2 else self.dynamicProgramming(n)", "def stairs(length):\n if length <= 3: # base case, doesn't draw anything\n return 0\n else:\n \n t.fillcolor(0,length/(length + 15),1) #15 chosen as it looks good\n \n # Creativity: this changes the color depending on position, making different\n # shades of blue and red, where each varies with x and y coordinate (remove hashtag)\n # t.fillcolor((t.pos()[0])/350,0,(t.pos()[1])/350)\n t.begin_fill()\n for i in range(4):\n t.forward(length)\n t.left(90)\n t.end_fill()\n count = 1\n \n t.forward(length)\n count += stairs(length/2) # adds to count and uses recursion to draw stairs\n t.backward(length)\n t.left(90)\n t.forward(length)\n t.right(90)\n count += stairs(length/2)\n t.left(90)\n t.backward(length)\n t.right(90)\n \n return count", "def spare_strollers(x):\n return spare_group(x, [\"Infancy\"])", "def get_sandwiches(*items):\n print(\"\\nSandwiches ordered: \")\n for value in items:\n print(\"-\" + value)", "def take(self, coli):\n cnt = self.stones.get(coli, 0)\n if cnt > 0:\n self.stones[coli] = cnt - 1\n else:\n raise SelfDestructionException(i18n.i18n('Cannot take stones'))", "def rest(self):\n if self.stamina + 2 > 3:\n self.stamina = 3\n else:\n self.stamina += 2", "def generate_stairs(self):\n entrance_room, exit_room = misc.random_choice(self.rooms, size=2)\n while entrance_room is exit_room:\n exit_room = misc.random_choice(self.rooms)\n\n entrance_y = random.randrange(entrance_room.y,\n entrance_room.y + entrance_room.h)\n entrance_x = random.randrange(entrance_room.x,\n entrance_room.x + entrance_room.w)\n\n entrance = Stairs(entrance_y, entrance_x, -1)\n\n exit_y = random.randrange(exit_room.y, exit_room.y + exit_room.h)\n exit_x = random.randrange(exit_room.x, exit_room.x + exit_room.w)\n\n exit_ = Stairs(exit_y, exit_x, 1)\n return entrance, exit_", "def hailstone(n):\n if n == 1:\n print(1)\n return 1\n elif n % 2 == 0:\n print(n)\n return hailstone(n // 2) + 1\n else:\n print(n)\n return hailstone(n * 3 + 1) + 1", "def make_sinc(t, i, y):\n sinc = boxcar.make_wave()\n sinc.shift(t)\n sinc.roll(i)\n sinc.scale(y * factor)\n return sinc", "def hailstone(n):\n print(n)\n if n == 1:\n return 1\n elif n % 2 == 0:\n return 1 + hailstone(n // 2)\n else:\n return 1 + hailstone((n * 3) + 1)", "def make_sandwich(*items): \n print(\"\\nI'll make you a great sandwich: \")\n for item in items:\n print(\" ...adding \" + item + \" to your sandwich.\")\n print(\"Your sandwich is ready!\")", "def vending_machine(snacks):\n idx = 0\n def vender():\n nonlocal idx\n snack = snacks[idx]\n idx = (idx + 1) % len(snacks)\n return snack\n return vender", "def hailstone(x):\n \"*** YOUR CODE HERE ***\"\n step = 1 \n while(x != 1):\n print(x)\n if(x % 2 == 0):\n x = x//2\n else:\n x = x*3+1\n step = step+1\n return step", "def stairway_path(stairway: Sequence[Union[float, int]]) -> Union[float, int]:\n\n def lazy_way(stairway):\n def get_cost(n):\n if n == 0 or n == 1:\n return stairway[n]\n return stairway[n] + min(get_cost(n - 1), get_cost(n - 2))\n\n return get_cost(len(stairway) - 1)\n\n def direct_way(stairway):\n last = len(stairway)\n nodeCost = [0] * last\n nodeCost[0] = stairway[0]\n nodeCost[1] = stairway[1] # +stairway[1]\n for i in range(2, last):\n nodeCost[i] = stairway[i] + min(nodeCost[i - 1], nodeCost[i - 2])\n print(i, stairway[i], nodeCost[i], min(stairway[i - 1], stairway[i - 2]))\n print('*' * 20)\n return nodeCost[-1]\n\n return direct_way(stairway)\n\n # pos = 0\n # last = len(stairway)\n # minCost = stairway[0]\n # pos = 0\n # last = len(stairway)\n # while pos < last - 1:\n # nextOne = stairway[pos + 1]\n # if pos == last - 2:\n # pos += 1\n # minCost += nextOne\n # continue\n # afterNext = stairway[pos + 2]\n # if nextOne >= afterNext:\n # pos += 2\n # minCost += afterNext\n # else:\n # pos += 1\n # minCost += nextOne\n # print(pos, stairway[pos], minCost)\n # if pos < (last - 1):\n # minCost += stairway[last - 1]\n # print(minCost)\n # print('*' * 20)\n # return minCost\n\n # pos = len(stairway) - 1\n # minCost = stairway[pos]\n # while pos > 1:\n # nextOne = stairway[pos - 1]\n # afterNext = stairway[pos - 2]\n # if (nextOne >= afterNext) or (pos == 2):\n # pos -= 2\n # minCost += afterNext\n # else:\n # pos -= 1\n # minCost += nextOne\n # print(pos, stairway[pos], minCost)\n # if pos == 1:\n # minCost += stairway[0]\n # print(minCost)\n # print('*' * 20)\n # return minCost", "def createSnake(x: int, y:int) -> Snake:\n return Snake(x, y)", "def flower(pen, n, size):\n for i in range(n):\n petal(pen, size)\n pen.right(360/n)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert object columns to categorical integers.
def obj_as_cat_int(df, ignore=[]): obj_cols = df.select_dtypes(include='object').columns for col in obj_cols: if col not in ignore: df[col] = df[col].astype('category') df[col] = df[col].cat.codes.astype("int16") df[col] -= df[col].min() return df
[ "def int_categorize(df):\n if \"Dx?\" in df.columns:\n df[\"Dx?\"] = df[\"Dx?\"].fillna(False).astype(bool)\n up = []\n for c in list(df.columns):\n if(str(df[c].dtype) == \"object\"):\n up.append(c)\n dicts = [dict() for u in up]\n df = update_encoding(df, dicts, up, 'category')\n for u in up:\n df = update_encoding(\n df,\n {m: i for i, m in enumerate(list(df[u].cat.categories))},\n u,\n int)\n return(df)", "def _convert_categorical_features_to_numeric(self,\n df):\n is_categorical_feature = [False for _ in df]\n\n for i, column in enumerate(df):\n if schema_util.is_categorical_feature(\n schema_util.get_feature(self._schema, column)):\n # Encode categorical columns\n df[column] = np.unique(df[column].values, return_inverse=True)[1]\n is_categorical_feature[i] = True\n return is_categorical_feature", "def to_categorical(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()", "def __convert_categorical_values(df,\n ordinal_categorical_fields_mapping,\n nominal_categorical_fields=None\n ):\n\n \"\"\"\n addr_state_mapping = {\n label: idx for idx, label in\n enumerate(np.unique(df['addr_state']))\n }\n\n zip_code_mapping = {\n label: idx for idx, label in\n enumerate(np.unique(df['zip_code']))\n }\n\n purpose_cat_mapping = {\n label: idx for idx, label in\n enumerate(np.unique(df['purpose_cat']))\n }\n \"\"\"\n\n # Convert ordinal categorical values to the numerical values\n if ordinal_categorical_fields_mapping is not None:\n df.replace(ordinal_categorical_fields_mapping, inplace=True)\n\n # df.replace(addr_state_mapping, inplace=True)\n # df.replace(zip_code_mapping, inplace=True)\n # df.replace(purpose_cat_mapping, inplace=True)\n\n # Convert nominal categorical values to the one-hot encoded fields\n for field_name in nominal_categorical_fields:\n dummies = pd.get_dummies(df[field_name]).rename(columns=lambda x: 'is_' + field_name + '_' + str(x))\n df = pd.concat([df, dummies], axis=1)\n df = df.drop([field_name], axis=1)\n\n return df", "def map_categorical_data(spark_df, input_column, output_column):\n\n stringIndexer = StringIndexer(\n inputCol=input_column,\n outputCol=output_column)\n mapped_df = stringIndexer.fit(spark_df).transform(spark_df)\n\n return cast_column(mapped_df, output_column, \"integer\")", "def cat2numeric(col:pd.Series)->None:\n le = preprocessing.LabelEncoder()\n num_values = le.fit_transform(col.values)\n col.replace(col.values,num_values, inplace=True)", "def cols_int_is_categorical(df, int_cols, threshold = 5):\r\n threshold = min(threshold, df.shape[0]/10)\r\n df_count = df[int_cols].apply(lambda x: len(set(x)))\r\n return list(df_count[df_count < threshold].index)", "def to_class(categorical_obj):\n categorical_list = list(categorical_obj)\n class_obj = []\n\n for yy in categorical_list:\n class_obj += [np.where(yy==max(yy))[0][0]]\n\n return class_obj", "def categorical_to_numeric(data, schema):\n attr_types = map(lambda attr: attr.tup[1], schema)\n nominal_indices = [i for i in range(len(attr_types)) if attr_types[i] is \"NOMINAL\"]\n for index in nominal_indices:\n values, i = {}, 0\n # Map each value of the attribute to a number from 0 to k\n for value in schema[index].tup[2]: # For each of the possible values of the attribute\n if value not in values:\n values[value] = i\n i += 1\n for example in data:\n example[index] = values[example[index]]\n return data", "def _ensure_categorical(arr):\n\n if not is_categorical(arr):\n from pandas import Categorical\n arr = Categorical(arr)\n return arr", "def convert_cat_codes(df,new_column, column):\n df[column]= df[column].astype('category')\n df[new_column] = df[column].cat.codes\n #df.column.m_interactions.corr(contacts.contact_channel_first)", "def to_one_hot(category_id, num_labels=80):\n index = coco_categories.index(category_id)\n return [0 if i != index else 1 for i in range(num_labels)]", "def dummization(self):\n #TODO: use sklearn ColumnTransformer instead\n\n return pd.get_dummies(\n self.simple_imputer(),\n prefix_sep='_',\n prefix=self.categorical_cols,\n columns=self.categorical_cols,\n drop_first=False\n )", "def insert_category_dtype(df):\n for col in df:\n if not pd.api.types.is_numeric_dtype(df[col].dtype):\n df[col] = df[col].astype('category')\n else:\n pass\n return df", "def __get_cat_levels(self,data):\n levels = {}\n\n for v in self.categorical:\n ds = data[v].astype('category')\n levels[v] = ds[ds.notnull()].unique().categories.sort_values()\n\n return levels", "def transform_categorical(df):\n categoricals = []\n\n for feature in ['gender', 'race', 'postal_code', 'occupation']:\n categoricals.append(pd.get_dummies(df[feature], prefix=feature))\n del(df[feature])\n\n df = pd.concat([df] + categoricals, axis=1)\n return df", "def one_hot_encoding(X):\n X_cat = pd.get_dummies(X.select_dtypes(include=['object']))\n X_num = X.select_dtypes(exclude=['object'])\n res = pd.concat([X_num, X_cat], axis=1, sort=False)\n \n return res", "def predict_categories(model: \"Model\", X: pd.DataFrame) -> np.ndarray:\n proba_df, cats_df = model.predict(X)\n return cats_df.category.to_numpy(dtype=\"int32\")", "def cat_to_num_teams(teams):\n cat_col = [\"team\"]\n dummies = pd.get_dummies(teams[cat_col])\n teams = teams.drop(cat_col, axis = 1)\n teams = pd.concat([teams, dummies], axis = 1)\n return teams" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert sales from wide to long format, and merge sales with calendar and prices to create one dataframe.
def melt_and_merge(calendar, prices, sales, submission=False): id_cols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] if submission: last_day = int(sales.columns[-1].replace('d_', '')) sales.drop(sales.columns[6:-MAX_LAG], axis=1, inplace=True) for day in range(last_day + 1, last_day + 28 + 1): sales[f'd_{day}'] = np.nan df = pd.melt(sales, id_vars=id_cols, var_name='d', value_name='sales') df = df.merge(calendar, on='d', copy = False) df = df.merge(prices, on=['store_id', 'item_id', 'wm_yr_wk'], copy=False) return df
[ "def index_sales(sale_count):\r\n data['index'] = list(range(sale_count))\r\n \r\n date = 0 \r\n price = 1\r\n \r\n for i in data['index']:\r\n sales['sale_' + str(i)] = [data['sales'][date], data['sales'][price]]\r\n date += 2\r\n price += 2", "def transform(self, X, y=None):\n assert type(X[0]) == DataFrame, \"X must be of type pandas.DataFrame\"\n\n data = X.copy()\n sales = data[0]\n\n # looking at different ways to get rid of \"no a number\"-Values\n # dropna subset, drops every row where subset-Value is NaN\n # dropna(how='all'), drops row if all values are NaN\n # dropna(thresh = m), drops row if at least m Values are NaN\n # nonan = no not a number (values)\n sales = sales.rename(columns={'day of week': 'dayofweek'})\n sales = sales.dropna(subset=['total'])\n sales = sales.dropna(subset=['dayofweek'])\n # sales_nonan2 = sales.dropna(how='all')\n # sales_nonan3 = sales.dropna(thresh=5)\n\n # Split first column in date and time and add as feature at the end\n dateandtime = sales[\"datetime\"]\n sales[[\"date\", \"time\"]] = dateandtime.str.split(pat=' ', n=1, expand=True)\n\n # OneHotEncoding of the weekdays\n encoder = OneHotEncoder(sparse=False, categories=[['Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']])\n dayofweek_encoded = encoder.fit_transform(sales[[\"dayofweek\"]])\n\n dayofweek_encoded_pd = DataFrame(dayofweek_encoded, columns=['Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun'])\n sales = pd.concat([sales, dayofweek_encoded_pd], axis=1)\n\n # columns=encoder.get_feature_names_out()\n\n # add weekday column starting from 0 = Monday to 6 = Sunday\n sales['weekday'] = pd.to_datetime(sales['date']).apply(lambda x: x.weekday())\n # sales = sales.rename(columns={'day of week': 'dayofweek'})\n\n # drop datetime and place, only date is important not datetime\n sales_inprogress = sales.drop(['datetime', 'place'], axis=1)\n\n # convert time in seconds, so its easier to calculate with it\n # and set specific times\n\n time_col = sales['time']\n # time.strptime('00:00:00,000','%I:%M:%S')\n\n # set daytime borders\n sales_inprogress['daytime'] = time_col\n\n for i in range(len(time_col)):\n if 39600 >= time_in_secs(time_col[i]) >= 25200:\n sales_inprogress['daytime'][i] = 0\n elif 54000 >= time_in_secs(time_col[i]) > 39600:\n sales_inprogress['daytime'][i] = 1\n elif 64800 >= time_in_secs(time_col[i]) > 54000:\n sales_inprogress['daytime'][i] = 2\n else:\n sales_inprogress['daytime'][i] = 3\n\n # looking if there are rows with morning sales or night sales\n important_cols = sales_inprogress[['weekday', 'daytime']].copy()\n\n zero_daytime = []\n for i in range(len(important_cols['daytime'])):\n if important_cols['daytime'][i] == 0:\n zero_daytime.append(i)\n\n for i in zero_daytime:\n sales_inprogress = sales_inprogress.drop(zero_daytime)\n\n # drop unnecessary columns\n sales_bproducts = sales_inprogress.drop(['dayofweek', 'time', 'total'], axis=1) # 'daytime' 'weekday'\n main_bakery = sales_bproducts.groupby(\n ['date', 'daytime', 'weekday', 'Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']).sum().reset_index()\n\n return data, main_bakery", "def get_tables(dictionary):\n \n sales = dictionary['rp_sale']\n parcels = dictionary['res_bldg']\n residences = dictionary['parcel']\n\n sales = sales[sales['DocumentDate'].astype(str).str.endswith('2019')]\n sales = sales[(sales['SalePrice'] > 200000) & (sales['SalePrice'] < 1500000)]\n combo = sales.merge(residences, on = ['Major','Minor'])\n combo = combo.merge(parcels, on = ['Major','Minor'])\n combo = combo[combo['BldgGrade'] > 1]\n combo = combo[(combo['PresentUse'] == 2)\n | (combo['PresentUse'] == 29)\n | (combo['PresentUse'] == 300)\n | (combo['PresentUse'] == 6)]\n combo = combo[combo['NbrLivingUnits'] != 10]\n\n ordinalcols = ['SalePrice','BrickStone','NbrLivingUnits',\n 'Stories','BldgGrade','SqFt1stFloor','SqFtUpperFloor','SqFtUnfinFull',\n 'SqFtUnfinHalf','SqFtTotLiving','SqFtTotBasement','SqFtFinBasement','SqFtGarageBasement',\n 'FinBasementGrade','SqFtGarageAttached','SqFtOpenPorch','SqFtEnclosedPorch',\n 'SqFtDeck','Bedrooms','BathHalfCount','Bath3qtrCount','BathFullCount','FpSingleStory',\n 'FpMultiStory','FpFreestanding','FpAdditional','YrBuilt','YrRenovated','Condition',\n 'AddnlCost','SqFtLot','MtRainier','Olympics','Cascades','Territorial','SeattleSkyline',\n 'PugetSound','LakeWashington','LakeSammamish','SmallLakeRiverCreek','OtherView',\n 'WfntFootage','LotDepthFactor','TrafficNoise', 'Address']\n\n categorycols = ['SaleReason', 'PropertyClass','HeatSystem','HeatSource','PresentUse','HBUAsIfVacant',\n 'HBUAsImproved','WaterSystem','SewerSystem','Access','InadequateParking','StreetSurface',\n 'Topography','WfntLocation','WfntBank','WfntPoorQuality','WfntRestrictedAccess',\n 'WfntAccessRights','WfntProximityInfluence','TidelandShoreland','PowerLines',\n 'OtherNuisances','AdjacentGolfFairway','AdjacentGreenbelt'] \n\n ordinaltable = combo[ordinalcols]\n categorytable = combo[categorycols]\n\n return (ordinaltable, categorytable)", "def events_wide_to_long(self, events):\n # Make a copy of the dataframe #\n df = events.copy()\n # We want to pivot on all columns except two #\n skip = ['step', 'amount']\n cols = [col for col in events_cols if col not in skip]\n # Reshape from wide to long format #\n df = pandas.wide_to_long(df,\n stubnames = \"amount\",\n i = cols,\n j = \"year\",\n sep = '_')\n # Drop rows that don't have an amount #\n df = df.dropna()\n # Reset index #\n df = df.reset_index()\n # Convert years to time steps #\n df['step'] = self.country.year_to_timestep(df['year'])\n # Drop the year column #\n df = df.drop(columns=['year'])\n # Reorder columns according to the correct input order #\n df = df[events_cols]\n # Return #\n return df", "def get_sales(self):\n return torch.from_numpy(self.sales_df.iloc[:, 5:].values).type(torch.get_default_dtype())", "def extractSalesTrx(filename):\r\n sales_cols = {'StoreNum':np.int64,\r\n 'Register':np.int64,\r\n 'TransNum':np.int64,\r\n 'TransDatetime(GMT)':'datetime64[ns]',\r\n 'TransDatetime(Local)':str,\r\n 'BusDate':'datetime64[ns]',\r\n 'UPC':str,\r\n 'ItemID':np.int64,\r\n 'DeptNum':np.int64,\r\n 'ItemQuantity':np.float64,\r\n 'WeightAmt':np.float64,\r\n 'SalesAmt':np.float64,\r\n 'CostAmt':np.float64,\r\n 'CashierNum':np.int64,\r\n 'PriceType':str,\r\n 'ServiceType':str,\r\n 'TenderType':str,\r\n 'LoyaltyCardNumber':np.int64}\r\n salesTrx = pd.read_csv(filename,\r\n sep='|',\r\n header=None,\r\n converters={6: str},\r\n parse_dates= [[3,4],5])\r\n salesTrx=pd.concat([salesTrx.iloc[:,1:4],salesTrx.iloc[:,0],salesTrx.iloc[:,0].astype(str),salesTrx.iloc[:,4:]],axis=1)\r\n salesTrx.columns=sales_cols.keys()\r\n salesTrx.fillna({'UPC':-999,\r\n 'LoyaltyCardNumber':-999},inplace=True)\r\n salesTrx=salesTrx.astype(sales_cols)\r\n salesTrx['BusDate'] = salesTrx['BusDate']+timedelta(hours=12)\r\n salesTrx.name = 'SalesTrx'\r\n print(salesTrx.dtypes)\r\n return(salesTrx)", "def all_sales():\n return [\n {\n \"sale_id\": 1,\n \"product\": \"Samsung Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 4500000\n },\n {\n \"sale_id\": 2,\n \"product\": \"Toshiba Flatscreen Tv\",\n \"quantity\": 6,\n \"price\": 9000000\n },\n {\n \"sale_id\": 3,\n \"product\": \"LG Flatscreen Tv\",\n \"quantity\": 12,\n \"price\": 1500000\n },\n {\n \"sale_id\": 4,\n \"product\": \"Sony Flatscreen Tv\",\n \"quantity\": 1,\n \"price\": 500000\n },\n {\n \"sale_id\": 5,\n \"product\": \"Hisense Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 800000\n },\n ]", "def flatten_data(df):\n temp_df = df.select(\"customerId\",explode(\"orders.basket\").alias(\"nbasket\"),\n col(\"orders.orderId\").alias(\"ordersId\"))\n\n final_df = (temp_df\n .select(\"customerId\",explode(\"ordersId\").alias(\"ordersId\"),\"nbasket\")\n .select(\"customerId\",\"ordersId\",explode(\"nbasket\").alias(\"new_topping\"))\n .select(\"customerId\",\"ordersId\",\"new_topping.*\")\n .withColumnRenamed(\"grossMerchandiseValueEur\", \"grossMerchandiseValueEur\")\n .withColumnRenamed(\"productId\", \"productId\")\n .withColumnRenamed(\"productType\", \"productType\")\n )\n \n\n return final_df", "def generate_monthly_frame(df_original, categories):\n\n df = df_original.copy()\n\n # -- groupby / pivot_table\n df = df.groupby(['customer_id', 'ym'])[categories].sum().reset_index()\n monthly_frame = pd.pivot_table(data=df,\n values=categories,\n index='customer_id',\n columns='ym',\n fill_value=0)\n\n return monthly_frame", "def create_aggregate_df():\n all_dates_df = pd.read_csv(\"datasets/all_dates_without_nan_df.csv\")\n aggregate_df = pd.DataFrame()\n\n tmp_date = first_date\n\n i = 0\n\n while tmp_date.date() < last_date.date():\n\n # add 20 lines for each interval\n while i < 20:\n aggregate_df = aggregate_df.append(\n {'Date': str(tmp_date)[0:10] + \" - \" + str(tmp_date + datetime.timedelta(days=delta - 1))[0:10],\n 'Stock Name': stock_columns[i]}\n , ignore_index=True)\n i += 1\n\n tmp_date = tmp_date + datetime.timedelta(days=delta)\n i = 0\n\n\n # create dummies for the stock names\n df_dummies = pd.DataFrame(data=pd.get_dummies(aggregate_df['Stock Name']))\n aggregate_df = aggregate_df.join(df_dummies)\n\n day_counter = 1\n\n # create delta columns for each day in the interval\n for i in range(1, delta + 1):\n aggregate_df['Day ' + str(day_counter)] = np.nan\n day_counter += 1\n\n i = 0\n tmp_date = first_date\n j = 0\n\n # add the relevant value of stock for each day\n while i < len(aggregate_df) and 0 <= (last_date.date() - tmp_date.date()).days:\n print(i)\n for day_counter in range(1, delta + 1):\n j = 0\n while j < 20:\n if 0 <= (last_date.date() - tmp_date.date()).days:\n col = [col for col in aggregate_df.columns if aggregate_df.loc[j, col] == 1]\n index = (tmp_date.date() - first_date.date()).days\n aggregate_df['Day ' + str(day_counter)][i + j] = all_dates_df.loc[index, col]\n j += 1\n else:\n break\n tmp_date = tmp_date + datetime.timedelta(days=1)\n i += j\n aggregate_df.to_csv('aggregate_df.csv')", "def _wide_to_long(data, data_type):\n if not data.columns.map(lambda x: issubclass(type(x), datetime.date)).any():\n raise ParameterError(\"Invalid table format. Must either have a 'date' column, or have dates as the columns.\")\n\n id_cols = [col for col in data.columns if not issubclass(type(col), datetime.date)]\n data = pd.melt(data, id_vars=id_cols, var_name=\"date\", value_name=data_type)\n\n id_cols.append(\"date\")\n data = data.set_index(id_cols)\n\n # Reorder index so date is first\n idx_names = list(data.index.names)\n idx_names.remove(\"date\")\n new_idx_name_order = [\"date\"] + idx_names\n data = data.reorder_levels(new_idx_name_order)\n\n # Convert index into just columns\n data = data.reset_index()\n\n return data", "def events_long_to_wide(self, events):\n # Make a copy of the dataframe #\n df = events.copy()\n # Convert the step to years #\n df['year'] = self.country.timestep_to_year(df['step'])\n # Drop the steps column #\n df = df.drop(columns=['step'])\n # We want to pivot on all columns except two #\n skip_cols = ['step', 'amount']\n cols = [col for col in events_cols if col not in skip_cols]\n # Reshape from long to wide format #\n df = df.pivot(index = cols,\n columns = 'year',\n values = 'amount')\n # We don't want the columns to be called 'year' though #\n df = df.rename_axis(columns=None)\n # Add 'amount_' in front of every column name #\n df = df.add_prefix('amount_')\n # Remove rows that are all NaNs #\n df = df.dropna(how='all')\n # Reset index #\n df = df.reset_index()\n # Sort entries #\n df = df.sort_values(cols)\n # Add the scenario column #\n df.insert(0, 'scenario', 'reference')\n # Return #\n return df", "def create_event_orders(self, events, sell_days, hold_days=5, amount=100):\n df_buy_events = pd.DataFrame(events, columns=[\"stock\"], index=events.index)\n df_buy_events[\"order\"] =\"Buy\"\n df_buy_events[\"year\"] = events.index.year\n df_buy_events[\"month\"] = events.index.month\n df_buy_events[\"day\"] = events.index.day\n df_buy_events[\"amount\"] = 100\n df_sell_events = pd.DataFrame(events,columns=[\"stock\"])\n df_sell_events.reset_index()\n df_sell_events.index = sell_days[0:events.shape(0)]\n #sell_days = df_sell_events.index + timedelta(days=hold_days)\n df_sell_events[\"order\"] =\"Sell\"\n df_sell_events[\"year\"] = df_sell_events.index.year\n df_sell_events[\"month\"] = df_sell_events.index.sell_days.month\n df_sell_events[\"day\"] = df_sell_events.index.sell_days.day\n #df_sell_events.index = sell_days\n df_sell_events[\"amount\"] = 100\n df_orders = df_sell_events.append(df_buy_events).sort_index()\n return df_orders", "def get_prices(self, fillna=0.):\n x = torch.from_numpy(self.prices_df.values).type(torch.get_default_dtype())\n x[torch.isnan(x)] = fillna\n x = x.repeat_interleave(7, dim=-1)[:, :self.calendar_df.shape[0]]\n assert x.shape == (self.num_timeseries, self.num_days)\n return x", "def merge(self):\n self.df = pd.merge(self.dfa,self.dfc,how='outer',left_index=True,right_index=True,suffixes=('_axys','_cust'))", "def load_prices(db, field_name, month, year) -> pd.DataFrame:\n assert db is not None\n assert len(field_name) > 0\n days_of_month = dates_of_month(month, year)\n rows = [{ 'asx_code': row['asx_code'],\n 'fetch_date': row['fetch_date'],\n 'field_name': field_name,\n 'field_value': clean_value(row[field_name])}\n for row in db.asx_prices.find({'fetch_date': { \"$in\": days_of_month},\n field_name: { \"$exists\": True }},\n {'asx_code': 1, field_name: 1, 'fetch_date': 1})\n ]\n if len(rows) == 0:\n df = pd.DataFrame(columns=['fetch_date', 'asx_code', field_name]) # return dummy dataframe if empty\n return df, rows\n # FALLTHRU\n df = pd.DataFrame.from_records(rows)\n df = df.pivot(index='fetch_date', columns='asx_code', values='field_value')\n #print(df)\n return df, rows", "def get_total_sales_daywise(self, type_of_plot):\r\n # Add Total Sales\r\n best_selling_day = self.all_data.groupby(self._day_of_week)[self._gross_sale].sum().reindex(self._ordered_day)\r\n\r\n # Reset Index\r\n best_selling_day = best_selling_day.reset_index()\r\n\r\n # Plot\r\n self.plot_data(type_of_plot, self._day_of_week, self._gross_sale, best_selling_day, \"Best Selling Day For The Year 2018\")", "def get_aggregated_ma_dollar_sales(self, level):\n prices = self.prices_df.fillna(0.).values.repeat(7, axis=1)[:, :self.sales_df.shape[1] - 5]\n df = (self.sales_df.iloc[:, 5:] * prices).T.rolling(28, min_periods=1).mean().T\n\n if level == self.aggregation_levels[-1]:\n x = df.values\n elif level == self.aggregation_levels[0]:\n x = df.sum().values[None, :]\n else:\n for g in level:\n df[g] = self.sales_df[g]\n\n df = df.groupby(level, sort=False).sum()\n x = df.values\n\n return torch.from_numpy(x).type(torch.get_default_dtype())", "def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize by setting the emotion and cause values
def __init__(self, emotion, cause, tweet, glove_size): self.emotion = emotion self.cause = cause self.tweet = tweet self.glove_size = glove_size
[ "def __init__(self, eman, game, entity_path=None):\n self.eman = eman\n self.game = game\n if entity_path:\n self.update_presets(entity_path)", "def initialize(self):\n if self.particle.params.T < self.particle.decoupling_temperature and not self.particle.in_equilibrium:\n self.particle.collision_integrals.append(self)\n\n if self.grids is None:\n self.grids = tuple([self.reaction[1].specie.grid, self.reaction[2].specie.grid])\n\n self.creaction = None\n self.cMs = None", "def initialise(self):\n\n self.__initialise_chomp_sound()\n self.__initialise_states()\n Character.initialise(self)", "def _setSpecificEmulationOptions(self):\n\n # CondDB usage by L0Muon emulator\n if self.isPropertySet(\"IgnoreL0MuonCondDB\"):\n log.info(\"L0Muon emulator will use the event TCK to get the FOI values : %s\"%(self.getProp(\"IgnoreL0MuonCondDB\")))\n l0muon = emulateL0Muon()\n l0muon.IgnoreCondDB = self.getProp(\"IgnoreL0MuonCondDB\")\n \n # TCK used by the L0Muon emulator \n if self.isPropertySet(\"L0MuonUseTCKFromData\"):\n log.info(\"L0Muon emulator will use the event TCK to get the FOI values : %s\"%(self.getProp(\"L0MuonUseTCKFromData\")))\n l0muon = emulateL0Muon()\n l0muon.UseTCKFromData = self.getProp(\"L0MuonUseTCKFromData\")\n \n # Set l0context for the emulation\n if self.isPropertySet(\"L0EmulatorContext\"):\n l0context = self.getProp(\"L0EmulatorContext\")\n log.info( \"The results of the L0 emulation will be written at location+%s\"%(l0context) )\n emulateL0Calo().L0Context = l0context\n emulateL0Muon().L0Context = l0context\n emulateL0DU().L0Context = l0context\n\n if self.isPropertySet(\"L0MuonForceLUTVersion\"):\n lutversion = self.getProp(\"L0MuonForceLUTVersion\")\n emulateL0Muon().LUTVersion = lutversion\n\n # Set electron emulation and Hcal threshold depending on data type\n if self.isPropertySet(\"DataType\"):\n datatype = self.getProp(\"DataType\")\n if datatype == \"2011\" or datatype == \"2010\" or datatype == \"2009\":\n emulateL0Calo().HcalThreshold = 0\n emulateL0Calo().EcalThreshold = 0\n elif datatype == \"2012\":\n emulateL0Calo().HcalThreshold = 8 # (default)\n emulateL0Calo().EcalThreshold = 5 # (default)\n if datatype == \"2010\" or datatype == \"2009\":\n emulateL0Calo().UseNewElectron = False\n else:\n emulateL0Calo().UseNewElectron = True\n\n if not self.isPropertySet(\"L0MuonForceLUTVersion\"):\n if datatype == \"2009\" or datatype == \"2010\" or datatype == \"2011\":\n emulateL0Muon().LUTVersion = \"V1\"\n elif datatype == \"2012\":\n emulateL0Muon().LUTVersion = \"V3\"\n elif datatype == \"2015\":\n emulateL0Muon().LUTVersion = \"V8\"", "def initialise(self):\n\n self.__initialise_chase_mode()\n self.__initialise_frightened_mode()\n Character.initialise(self)", "def express_current_emotion(self, *_args):\n\n # SETUP\n # Events.\n memory.unsubscribeToEvent(\"VAChanged\", self.getName())\n\n # Motion.\n motion_names = list()\n motion_times = list()\n motion_keys = list()\n \n # Eyes.\n eye_colour_lookup_table = [[(0xF82C35),(0xF82C35),(0xD55528),(0xD55528),(0xFF622B),(0xFF622B),(0xFFB047),(0xFFB047),(0xFFB047),(0xFFB047),(0xFFB047)],\n [(0xF82C35),(0xF82C35),(0xD5542A),(0xD5542A),(0xE96A37),(0xFF8232),(0xFF8232),(0xFEB340),(0xFEB340),(0xFEB340),(0xFFFF00)],\n [(0xF62D35),(0xF62D35),(0xF62D35),(0xE96A37),(0xE96A37),(0xFF984D),(0xFF8232),(0xFDC147),(0xFFB144),(0xFFFF00),(0xFFFF00)],\n [(0xF72C32),(0xF72C32),(0xFF4048),(0xFE5761),(0xED8659),(0xFEB278),(0xFECE6A),(0xFECE6A),(0xFEE566),(0xFFFF00),(0xFFFF00)],\n [(0xF6255C),(0xF6255C),(0xF9386F),(0xFD585E),(0xF78C84),(0xFFB379),(0xFEDEA1),(0xFEE67C),(0xFFE564),(0xFFFF00),(0xFFFF00)],\n [(0xF6255C),(0xF93871),(0xF93871),(0xFE9EB9),(0xFE9EB9),(0xFFFFFF),(0xD0E7B3),(0xA5D277),(0x85B957),(0x6EAB34),(0x6EAB34)],\n [(0xA82C72),(0xA82C72),(0xC03381),(0xDB5CA1),(0xE8A1C3),(0xD1E5F0),(0xCFDADE),(0x73B8B3),(0x87B958),(0x6EAB34),(0x6EAB34)],\n [(0xA82C72),(0xA82C72),(0xC03381),(0x9C3F74),(0xB36893),(0xD1E4F2),(0x91C3E6),(0x91C3E6),(0x219A95),(0x00948E),(0x6BAC34)],\n [(0xA82C72),(0xA82C72),(0x86305D),(0x86305D),(0x94C8D6),(0x93C8D8),(0x92C2E6),(0x3196CE),(0x009591),(0x009591),(0x009591)],\n [(0xA62D72),(0x692850),(0x692850),(0x692850),(0x2D9DB1),(0x2C9FB2),(0x2F96CE),(0x0085BE),(0x00968D),(0x00968D),(0x00968D)],\n [(0x692850),(0x692850),(0x692850),(0x692850),(0x037F9B),(0x037F9B),(0x0085BE),(0x0085BE),(0x0085BE),(0x0085BE),(0x0085BE)]\n ]\n\n # Speech.\n # Speech parameter lookup table. Format (pitch modifier, volume modifier)\n speech_parameter_lookup_table = [((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),\n ((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),\n ((1.00,0.75),(0.81,0.75),(0.00,0.00),(0.00,0.00),(-0.25,0.00),(0.50,1.00),(0.62,0.50),(0.75,),(0.75,),(0.75,0.75),(1.00,0.75)),\n ((1.00,0.50),(0.63,0.50),(-0.20,-0.50),(-1.00,-1.00),(-0.25,-0.50),(0.25,0.50),(0.25,0.50),(0.50,),(0.50,0.50),(0.50,0.50),(0.00,0.50)),\n ((1.00,0.25),(0.44,0.25),(0.40,-0.50),(0.30,-0.50),(0.25,-0.50),(0.25,0.00),(0.25,0.00),(0.25,0.25),(0.25,0.25),(0.25,0.25),(0.00,0.25)),\n ((1.00,0.00),(0.25,0.00),(0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.10,0.00),(0.10,0.00),(0.10,0.00),(0.00,0.00)),\n ((0.25,-0.25),(0.06,-0.25),(-0.10,-0.25),(-0.20,0.00),(-0.20,0.00),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),\n ((-0.25,-0.50),(-0.13,-0.50),(-0.35,-0.50),(-0.20,-0.25),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),\n ((-0.25,-0.75),(-0.31,-0.75),(-0.35,-0.75),(-0.10,-0.50),(-0.10,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),\n ((-0.50,-1.00),(-0.50,-1.00),(-0.40,-1.00),(-0.20,-0.75),(-0.10,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),\n ((-0.50,-1.00),(-0.50,-1.00),(-0.50,-1.00),(-0.25,-0.75),(0.00,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00))]\n \n # CALCULATIONS\n # Get current emotional values and generic calcs.\n current_emotion = memory.getData(\"Emotion/Current\")\n print \"current_emotion (module): \", current_emotion\n valence = current_emotion[0][0]\n arousal = current_emotion[0][1]\n emotion_name = current_emotion[3][0]\n # Valence and arousal are normalised between -1 and 1, with an axis intersection at (0, 0). Convert axis intersection\n # to index.\n valence_index = (int(valence * 5) + 5)\n arousal_index = 10 - (int(arousal * 5) + 5)\n\n # Speech.\n # The pitch and volume modifier values need scaled, final value to be determined. e.g. a value of 4 will divide the parameter by 4 to give a +/- of 25% of the default value\n speech_parameter_scaling_value = 4\n string_to_say = \"I am feeling \" + emotion_name\n scaled_pitch_modifier = 1 + (speech_parameter_lookup_table[arousal_index][valence_index][0] / speech_parameter_scaling_value)\n # NAO can only increase pitch! So need to check if a pitch reduction required and negate it. Range 1.0 - 4.0.\n if scaled_pitch_modifier < 1.0:\n scaled_pitch_modifier = 1.0\n # NAO volume (gain) range 0.0 - 1.0.\n scaled_volume_modifier = 0.5 + (speech_parameter_lookup_table[arousal_index][valence_index][1] / speech_parameter_scaling_value)\n self.tts.setParameter(\"pitchShift\", scaled_pitch_modifier)\n self.tts.setVolume(scaled_volume_modifier)\n \n # Eyes. \n hex_eye_colour = eye_colour_lookup_table[arousal_index][valence_index]\n eye_duration = 2.0\n\n # Motion.\n # Head pitch - inversely proportional to arousal.\n # Head pitch has a range of approx +0.5 to -0.5 radians so divide normalised arousal value by 2.\n head_pitch = arousal / 2 * -1\n\n motion_names.append(\"HeadPitch\")\n motion_times.append([0.5, 2, 4])\n motion_keys.append([0.0, head_pitch, 0.0])\n\n # Stance (torso position + arms) - directly proportional to valence\n # Shoulders have a pitch of +2 to -2 radians.\n # Used in absolute mode, central pitch value is 1.4radians.\n shoulder_pitch = 1.4 - valence * 0.5\n\n motion_names.append(\"LShoulderPitch\")\n motion_times.append([0.5, 2, 4])\n motion_keys.append([1.45726, shoulder_pitch, 1.45726])\n\n motion_names.append(\"RShoulderPitch\")\n motion_times.append([0.5, 2, 4])\n motion_keys.append([1.4, shoulder_pitch, 1.4])\n\n # Ankles have a pitch of approx +0.9 to -1.1radians.\n # Used in absolute mode, central pitch value is 0.08radians.\n ankle_pitch = 0.08 - valence * 0.05\n\n motion_names.append(\"LAnklePitch\")\n motion_times.append([0.5, 2, 4])\n motion_keys.append([0.08, ankle_pitch, 0.08])\n\n motion_names.append(\"RAnklePitch\")\n motion_times.append([0.5, 2, 4])\n motion_keys.append([0.08, ankle_pitch, 0.08])\n \n\n # OUTPUTS\n # Speech.\n self.tts.post.say(string_to_say)\n # Motion.\n self.motion.post.angleInterpolation(motion_names, motion_keys, motion_times, True)\n # Eyes. \n self.leds.fadeRGB(\"FaceLeds\", hex_eye_colour, eye_duration)\n time.sleep(5.0)\n self.leds.reset(\"FaceLeds\")\n\n\n # TIDY UP\n # Reset speech parameters to nominal.\n self.tts.setParameter(\"pitchShift\", 0)\n self.tts.setVolume(0.5)\n memory.subscribeToEvent(\"VAChanged\", self.getName(), \"express_current_emotion\")", "def __init__(self, env) -> None:\n gym.Wrapper.__init__(self, env)\n self._score = 0\n self.env.unwrapped.episode_rewards = []", "def initialize_episode(self, physics):\n # Random joint angles:\n randomizers.randomize_limited_and_rotational_joints(physics, self.random)\n # Random target position.\n close_target = self.random.rand() < .2 # Probability of a close target.\n target_box = .3 if close_target else 2\n xpos, ypos = self.random.uniform(-target_box, target_box, size=2)\n physics.named.model.geom_pos['target', 'x'] = xpos\n physics.named.model.geom_pos['target', 'y'] = ypos\n physics.named.model.light_pos['target_light', 'x'] = xpos\n physics.named.model.light_pos['target_light', 'y'] = ypos\n\n super(Swimmer, self).initialize_episode(physics)", "def init_emb(self):\n initrange = 0.5 / self.embedding_dim\n self.embeddings.weight.data.uniform_(-initrange, initrange)\n init.kaiming_normal(self.linear1.weight.data)\n self.linear1.bias.data.zero_()\n init.kaiming_normal(self.linear2.weight.data)\n self.linear2.bias.data.zero_()\n self.affine.weight.data.uniform_(-0, 0)\n self.affine.bias.data.zero_()", "def initialize(self):\n\n \"*** YOUR CODE HERE\"\n #agent가 생성될때마다 agentNum을 하나씩 증가시킨다.\n MyAgent.agentNum = MyAgent.agentNum+1", "def initialise(self):\n\n # I made these all separate functions to reduce confusion\n self.__initialise_direction()\n self.__initialise_grid_position()\n self.__initialise_image()\n self._initialise_bindings()\n self.update_character_size()", "def __init__(self, id, E_0):\n Material.__init__(self, id, E_0, None)", "def setUp(self):\n (X, Y, Z) = ss.get_total_spin(4, 2**4)\n H_target = ss.get_H_WHH_0(X, Y, Z, delta=500)\n self.env = spin_sys_discrete.SpinSystemDiscreteEnv(\n N=4, dim=2**4, coupling=1e3, delta=500, H_target=H_target,\n delay_after=True, episode_length=5\n )", "def init_emb(self):\n initrange = 0.5 / self.embedding_dim\n self.embeddings.weight.data.uniform_(-initrange, initrange)\n init.kaiming_normal(self.linear1.weight.data)\n self.linear1.bias.data.zero_()\n self.affine.weight.data.uniform_(-0, 0)\n self.affine.bias.data.zero_()", "def __init__(self):\n this = _coin.new_SoMotion3Event()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, mts=MotionSensor(MOTION_INPUT)):\n self.__mts = mts", "def __init__(self):\n self._red_led = pyb.LED(1) # Turns led on (red color)\n self._red_led.on()\n # Setup sensor settings\n # https://docs.openmv.io/library/omv.sensor.html#constants\n sensor.reset()\n sensor.set_vflip(True) # Reverse image on vertical axis\n sensor.set_hmirror(True) # Reverse image on horizontal axis\n sensor.set_pixformat(sensor.RGB565)\n sensor.set_framesize(sensor.QVGA)\n sensor.set_auto_gain(False) # Must be turned off for color tracking\n # Must be turned off for color tracking\n sensor.set_auto_whitebal(False)", "def __init__(self, win, position):\n red = randint(0,255)\n green = randint(0,255)\n blue = randint(0,255)\n\n # body\n p1 = Point(position.getX()-40, position.getY()-20 )\n p2 = Point(position.getX()+40, position.getY()+20)\n self.body = Oval( p1, p2 )\n self.body.setFill(color_rgb(red, green, blue))\n\n # tail\n p1 = Point(position.getX()+30, position.getY()-30)\n p2 = Point(position.getX()+50, position.getY()+30)\n self.tail = Oval( p1, p2 )\n self.tail.setFill( \"black\" )\n\n # eye\n center2 = Point( position.getX()-15, position.getY()-5)\n self.eye_level = center2.getY()\n self.eye = Circle( center2, 5 )\n self.eye.setFill( \"black\" )", "def setup(self):\r\n linedetector = LineDetector()\r\n lightdetector = LightDetector()\r\n threatdetector = ThreatDetector()\r\n\r\n # Create behaviours\r\n self.sensobs = [linedetector, lightdetector, threatdetector]\r\n self.whiteline = Whiteline(self, linedetector)\r\n self.moving_object = Moving_object(self, threatdetector)\r\n self.redgreenlight = Redgreenlight(self, lightdetector, linedetector)\r\n\r\n # Updates sensobs\r\n linedetector.add_behavior(self.whiteline)\r\n linedetector.add_behavior(self.redgreenlight)\r\n lightdetector.add_behavior(self.redgreenlight)\r\n threatdetector.add_behavior(self.moving_object)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate context score with GLoVe embedding
def calc_glove_score(self, context): context_embedding = np.full(self.glove_size, 1.e-28) for word in context: if word in Seed.glove_embeddings.keys(): word_vec = np.array(Seed.glove_embeddings[word]) context_embedding += word_vec return context_embedding
[ "def encode_glove_average(X_train , X_test , embedding_dim , word_index):\r\n import os\r\n import numpy as np\r\n #Embedding the vector in this step\r\n EMBEDDING_DIM = embedding_dim\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim) + \"d.txt\"\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim) + \"d.txt\"\r\n \r\n #This is very time consuming, do not run again and again\r\n GLOVE_DIR = 'C:/Users/Nipun.Puri/Desktop/wordEmbed/'\r\n\r\n embeddings_index = {} #dictionary keys - words and values are the 100 dimension vector\r\n f = open(os.path.join(GLOVE_DIR, FILE_NAME) , encoding=\"utf8\")\r\n for line in f:\r\n values = line.split() #each line in the glove text file\r\n word = values[0] #The thing on the 0th position is the word\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\n f.close()\r\n\r\n print('Found %s word vectors.' % len(embeddings_index))\r\n \r\n embedding_matrix = np.ranom.rand(len(word_index) + 1, EMBEDDING_DIM) / 100\r\n for word, i in word_index.items():\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be the same randomly intialized, better than zero\r\n embedding_matrix[i] = embedding_vector\r\n \r\n print(\"The dimention of the embedding Matrix should be : (nx , EMBEDDING_DIM)\")\r\n print(\"And it actually is: \" + str(embedding_matrix.shape))\r\n \r\n import itertools\r\n\r\n zeroList = np.zeros((1,EMBEDDING_DIM)).tolist()[0]\r\n x_train_glove = np.zeros((X_train.shape[0] , EMBEDDING_DIM))\r\n row = 0\r\n for document in X_train:\r\n #Looping over each Tweet\r\n vectorTemp = np.zeros((1,EMBEDDING_DIM)) #initializing the vector representation of the tweet\r\n #This will become a 1 x 300 vector\r\n for word in document:\r\n if word == 0:#This is the padded one\r\n vectorTemp = vectorTemp + np.zeros((1,EMBEDDING_DIM))\r\n else:\r\n vectorTemp = vectorTemp + np.array(embedding_matrix[word , :])\r\n vectorTemp = vectorTemp / 30\r\n #And this helps us in decomposing it\r\n x_train_glove[row] = vectorTemp\r\n row+=1\r\n\r\n x_test_glove = np.zeros((X_test.shape[0] , EMBEDDING_DIM))\r\n row = 0\r\n for document in X_test:\r\n #Looping over each Tweet\r\n vectorTemp = np.zeros((1,EMBEDDING_DIM)) #initializing the vector representation of the tweet\r\n for word in document:\r\n if word == 0:#This is the padded one\r\n vectorTemp = vectorTemp + np.zeros((1,EMBEDDING_DIM))\r\n else:\r\n vectorTemp = vectorTemp + np.array(embedding_matrix[word , :])\r\n vectorTemp = vectorTemp / 30\r\n #And this helps us in decomposing it\r\n x_test_glove[row] = vectorTemp\r\n row+=1\r\n\r\n print(\"Shape of Training set now is: \" + str(x_train_glove.shape))\r\n print(\"Shape of Test set now is: \" + str(x_test_glove.shape))\r\n \r\n return (x_train_glove , x_test_glove)", "def glove_layer(features, mode, params, scope=None):\n if 'context_vecs' in features:\n tf.logging.info('Words are pre-embedded')\n # TODO(ddohan): Consider an explicit \"use_tf_data\" flag\n xw, qw = tfdata_emb_layer(features)\n return None, xw, qw\n else:\n tf.logging.info('Doing embeddings in graph.')\n tf.logging.info('# Glove layer')\n with tf.variable_scope(scope or 'glove_layer'):\n training = mode == tf.estimator.ModeKeys.TRAIN\n\n # The first two values are for UNK & PAD\n emb_mat_const = features['emb_mat'][2:, :]\n emb_mat_var = tf.get_variable('glove_emb_mat_var',\n [2, emb_mat_const.get_shape()[1]])\n emb_mat = tf.concat([emb_mat_var, emb_mat_const], 0)\n\n if training:\n emb_mat = tf.nn.dropout(\n emb_mat,\n keep_prob=1.0 - params['word_embedding_dropout'],\n noise_shape=[emb_mat.shape.as_list()[0], 1])\n\n xv = tf.nn.embedding_lookup(emb_mat,\n features['glove_indexed_context_words'])\n qv = tf.nn.embedding_lookup(emb_mat,\n features['glove_indexed_question_words'])\n return emb_mat, xv, qv", "def forward(self, context_embed: torch.Tensor, reply_embed: torch.Tensor) -> torch.Tensor:\n \n assert context_embed.size(0) == reply_embed.size(0)\n true_label = torch.arange(context_embed.size(0)).to(self.device)\n total_size = context_embed.size(0)\n\n if self.loss_type=='dot':\n \"\"\"\n No negative sampling applied\n \"\"\"\n similarity = torch.matmul(context_embed, reply_embed.transpose(1,0))\n # true_label = torch.arange(sim.size(0))\n loss = F.cross_entropy(input=similarity, target=true_label)\n correct_count = similarity.argmax(-1).eq(true_label).sum().item()\n correct_count_for_recall = true_label.eq(similarity.argmax(-1)).sum().item()\n predict_label = similarity.argmax(-1).tolist()\n\n return loss, [correct_count, correct_count_for_recall], total_size, predict_label", "def embedding_similarity_score(sentence1, sentence2):\n \n # Process text - extract POS and embeddings\n doc1 = nlp(unicode(sentence1))\n doc2 = nlp(unicode(sentence2))\n \n # Get a list of tokens, only for those tokens which are not stopwords or punctuation\n tokens1 = [token for token in doc1 if token.text not in stops and token.pos_ != u'PUNCT']\n tokens2 = [token for token in doc2 if token.text not in stops and token.pos_ != u'PUNCT']\n \n # accumulate the Cosine similiarities between vectors, and number of matched vectors. \n score1, count1, score2, count2 = 0.0, 0, 0.0, 0 \n \n # For each word in the first sentence\n for tok1 in tokens1:\n try:\n # Get the similarity value of the most similar word in the other sentence\n best_score = max([tok1.similarity(tok2) for tok2 in tokens2])\n except Exception as e:\n best_score = None\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score1 += best_score\n count1 += 1\n \n for tok2 in tokens2:\n try:\n # Get the similarity value of the most similar word in the other sentence\n best_score = max([tok2.similarity(tok1) for tok1 in tokens1])\n except Exception as e:\n best_score = None\n # Check that the similarity could have been computed\n if best_score is not None:\n score2 += best_score\n count2 += 1\n \n try:\n # Average the values and add score from both sides to get symmetic distance\n score = .5*(score1/count1 + score2/count2)\n return(score)\n except:\n return(None)", "def _batch_tactic_scores(self, goal_embeddings):\n return jp._batch_tactic_scores(self, goal_embeddings)", "def calculate_similarity(source_doc,\n target_doc,\n embedding=\"Glove\",\n threshold=0):\n def w2v_vectorize(doc):\n \"\"\"Identify the vector values for each word in the given document\"\"\"\n doc = [i.lower().split() for i in doc]\n word_list = []\n for w in doc:\n w = [word for word in w if word not in stopwords.words('english')]\n word_list.append(w)\n vec_list = []\n for words in word_list:\n word_vecs = []\n for word in words:\n try:\n vec = w2v_model[word]\n word_vecs.append(vec)\n except KeyError:\n pass\n vector = np.mean(word_vecs, axis=0)\n vec_list.append(vector)\n vectors = np.mean(vec_list, axis=0)\n return vectors\n\n def glove_vectorize(doc):\n \"\"\"Identify the vector values for each word in the given document\"\"\"\n doc = [i.lower().split() for i in doc]\n word_list = []\n for w in doc:\n w = [word for word in w if word not in stopwords.words('english')]\n word_list.append(w)\n vec_list = []\n for words in word_list:\n word_vecs = []\n for word in words:\n try:\n vec = glove_model[word]\n word_vecs.append(vec)\n except KeyError:\n pass\n vector = np.mean(word_vecs, axis=0)\n vec_list.append(vector)\n vectors = np.mean(vec_list, axis=0)\n return vectors\n\n def fasttext_vectorize(doc):\n \"\"\"Identify the vector values for each word in the given document\"\"\"\n doc = \" \".join(doc)\n doc = doc.lower()\n words = [w for w in doc.split(\" \")]\n word_vecs = []\n for word in words:\n try:\n vec = fasttext_model[word]\n word_vecs.append(vec)\n except KeyError:\n # Ignore, if the word doesn't exist in the vocabulary\n pass\n vector = np.mean(word_vecs, axis=0)\n return vector\n\n def cosine_sim(vecA, vecB):\n \"\"\"Find the cosine similarity distance between two vectors.\"\"\"\n csim = np.dot(vecA,\n vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))\n if np.isnan(np.sum(csim)):\n return 0\n return csim\n\n if embedding == \"Word2Vec\":\n w2v_model = KeyedVectors.load_word2vec_format(\n 'Semantic_Similarity/Word_Embedding/data/GoogleNews-vectors-negative300.bin',\n binary=True,\n )\n source_vec = w2v_vectorize(source_doc)\n target_vec = w2v_vectorize(target_doc)\n sim_score = cosine_sim(source_vec, target_vec)\n\n if sim_score > threshold:\n return sim_score\n\n elif embedding == \"Glove\":\n source_vec = glove_vectorize(source_doc)\n\n target_vec = glove_vectorize(target_doc)\n sim_score = cosine_sim(source_vec, target_vec)\n\n if sim_score > threshold:\n return sim_score\n\n elif embedding == \"FastText\":\n fasttext_model = FastText.load_fasttext_format(\n 'Semantic_Similarity/Word_Embedding/data/cc.en.300.bin')\n source_vec = fasttext_vectorize(source_doc)\n target_vec = fasttext_vectorize(target_doc)\n sim_score = cosine_sim(source_vec, target_vec)\n\n if sim_score > threshold:\n return sim_score", "def compute_subtract_context(\n subtract_context, target_rep, context_token_reps_excl_target\n):\n # sum over context minus target word\n sum_context_without_target = np.sum(context_token_reps_excl_target, axis=0)\n\n if subtract_context:\n averaged_context_without_target = sum_context_without_target / len(\n sum_context_without_target\n )\n\n sentence_vec = np.subtract(target_rep, sum_context_without_target)\n\n else:\n sentence_vec = np.add(target_rep, sum_context_without_target)\n sentence_vec /= len(sum_context_without_target) + 1\n\n return sentence_vec", "def extrace_advanced_features(word_vec,cluster_model,message,edited_message,*arg):\n\tnumber_of_clusters = len(cluster_model.cluster_centers_)\n\tvocab = word_vec.wv.vocab\n\tfeatures = [0]*(number_of_clusters)\n\twords = re.split('[^a-z0-9]',edited_message.lower())\n\twords = filter(lambda x: x != '', words)\n\tif len(arg)!=0:\n\t\twords = english_stemmer(words)\n\tfor word in words:\n\t\tif word in vocab:\n\t\t\tcluster_number = cluster_model.predict(np.array(word_vec.wv[word]).reshape(1,-1))[0]\n\t\t\tfeatures[cluster_number]=features[cluster_number]+1\n\tfeatures.append(number_of_spaced_words(edited_message))\n\tfeatures.append(caps_ratio(message))\n\tfeatures.append(symbole_density(message))\n\tfeatures.append(len(message))\n\treturn np.array(features)", "def _score_based_softmax(self, x, v_dim, embedding, z, language):\n full_set = torch.arange(0, v_dim, device=self.device, dtype=torch.long)\n full_set_embedded = embedding(full_set)\n # [V x D]\n\n batch_embeddings = embedding(x)\n if language == \"en\":\n batch_score = (z * batch_embeddings).sum(dim=2)\n # [B x S_e], dot product between every english word and latent in B x S_e\n else:\n batch_score = torch.bmm(batch_embeddings, z.transpose(1, 2))\n # [B x S_f x S_e], dot product between every french word an every english latent in B sentences\n\n full_set_score = torch.matmul(z, full_set_embedded.transpose(0, 1))\n # [B x S_e x V], dot product between every english latent in B sentences with every word in V\n\n u = torch.max(full_set_score, dim=2)[0]\n # [B x S_e]\n\n # Compute stable exponentials\n if language == \"en\":\n batch_score = torch.exp(batch_score - u)\n else:\n batch_score = torch.exp(batch_score - u.unsqueeze(1))\n full_set_score = torch.exp(full_set_score - u.unsqueeze(2)).sum(dim=2)\n # [B x S_e]\n\n if language == \"en\":\n return batch_score / full_set_score\n # [B x S_e]\n else:\n return batch_score / full_set_score.unsqueeze(1)\n # [B x S_f x S_e]", "def words_avg_embedding(words: list, glove):\n\n word_embeddings = map(partial(get_word_vec, glove=glove), words)\n sum_words_embedding = reduce(np.add, word_embeddings)\n return sum_words_embedding / len(words)", "def _initialize_from_glove(glove_dict, token2id, embedding_dims):\n\n embeddings = np.zeros((len(token2id), embedding_dims), dtype=np.float32)\n for multi_word_token, token_id in token2id.items():\n total = 0\n for word in multi_word_token.split(' '):\n if word in glove_dict:\n embeddings[token_id] += glove_dict[word]\n total += 1\n assert total > 0 or multi_word_token == 'OOV'\n embeddings[token_id] /= max(1e-6, total)\n return embeddings", "def __init__(self, \n tokenizer:Tokenizer, \n max_cardinality:int =-1, \n vocab_size:int =20000, \n max_sequence_length:int =1000, \n glove_file_path:str = './Datasets/GloVeWordVectors/glove.6B/glove.6B.100d.txt', \n output_dim:int =100,\n batch_size:int = 1,\n show_feedback:bool =False): \n try:\n print('~~~~~~~~ Init Embedding GloVe ~~~~~~~~~') \n self.GLOVE_DIR = glove_file_path\n print('GloVe file:\\t\\t=> ', self.GLOVE_DIR)\n\n self.EMBEDDING_DIM = output_dim if (output_dim > 0) else 100\n print('Output dimension:\\t=> ', self.EMBEDDING_DIM)\n\n self.MAX_SEQUENCE_LENGTH = max_sequence_length if (max_sequence_length > 0) else 1000\n print('Input/padding:\\t\\t=> ', self.MAX_SEQUENCE_LENGTH)\n \n self.batch_input_shape=(batch_size, self.MAX_SEQUENCE_LENGTH)\n\n self.MAX_NUM_WORDS = vocab_size if (vocab_size > 0) else 20000\n print('Vocab size:\\t\\t=> ', self.MAX_NUM_WORDS)\n\n self.max_cardinality = max_cardinality\n print('Max nodes cardinality:\\t=> ', self.max_cardinality)\n\n self.tokenizer = tokenizer\n print('Tokenizer: \\t\\t=> reloaded')\n\n self.show_response = show_feedback\n\n print('~~~~~~ Collect Embedding Indices ~~~~~~')\n self.embedding_indices = self.LoadGloVeEmbeddingIndices()\n print('Loaded word vectors:\\t=> ', len(self.embedding_indices))\n\n except Exception as ex:\n template = \"An exception of type {0} occurred in [GloVeEmbeddingLayer.Constructor]. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)", "def extract_vectors(context, index_target, language_model, vocab, to_extract, cuda = False):\n data_context, index_target, word = prepare_input(context, index_target, vocab, mode = 'bidir', cuda = cuda)\n\n word_emb = language_model.encoder.embedding.weight.data[vocab.word2idx[word]]\n\n hidden = language_model.init_hidden(1)\n\n # Extract hidden layers (current and predictive) for each layer\n predictive_hidden_layers, hidden_layers = language_model.extract_hidden_layers(data_context, hidden, index_target)\n\n extracted_vectors = {i:[] for i in to_extract}\n\n for vec in to_extract:\n if 'hidden' in vec:\n n_layer = int(vec[-1]) - 1\n if 'current' in vec:\n toadd = hidden_layers[n_layer]\n elif 'predictive' in vec:\n toadd = predictive_hidden_layers[n_layer]\n if vec == 'wordemb':\n toadd = word_emb\n if vec == 'avg_context':\n\n to_avg = []\n window = 10\n\n start = index_target - window / 2\n end = index_target + window / 2\n if end >= len(data_context[0]):\n start = start - (end - len(data_context[0]))\n end = len(data_context[0])\n if start < 0:\n end = end - start + 1\n start = 0\n window = data_context[0][int(start):int(end)]\n x = []\n for token in window:\n #Skip unknown words, end of sentence symbols and punctuation\n if vocab.idx2word[token] != \"<unk>\" and vocab.idx2word[token] != \"<eos>\" and vocab.idx2word[token] not in string.punctuation + '’”“':\n to_avg.append(language_model.encoder.embedding.weight.data[token].cpu().detach().numpy())\n x.append(vocab.idx2word[token])\n toadd = np.average(to_avg, axis = 0)\n toadd = torch.tensor(toadd).cuda() if cuda else torch.tensor(toadd)\n toadd = toadd.squeeze()\n toadd = toadd.cpu().detach().numpy()\n extracted_vectors[vec] = toadd\n return extracted_vectors", "def encode_glove_unigram(X_train , X_test , embedding_dim , word_index):\r\n import os\r\n import itertools\r\n import numpy as np\r\n \r\n \r\n #Embedding the vector in this step\r\n EMBEDDING_DIM = embedding_dim\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim) + \"d.txt\"\r\n \r\n #This is very time consuming, do not run again and again\r\n GLOVE_DIR = '/emdedding/'\r\n\r\n embeddings_index = {} #dictionary keys - words and values are the 100 dimension vector\r\n embeddings_index = {} #dictionary keys - words and values are the 100 dimension vector\r\n f = open(os.path.join(GLOVE_DIR, FILE_NAME) , encoding=\"utf8\")\r\n for line in f:\r\n values = line.split() #each line in the glove text file\r\n word = values[0] #The thing on the 0th position is the word\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\n f.close()\r\n\r\n print('Found %s word vectors.' % len(embeddings_index))\r\n \r\n embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\r\n \r\n notFoundCounter = 0\r\n for word, i in word_index.items():\r\n embedding_vector = embeddings_index.get(word)\r\n \r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n embedding_matrix[i] = embedding_vector\r\n else:\r\n notFoundCounter+=1\r\n \r\n print(\"Vectors not found: \" +str(notFoundCounter))\r\n \r\n print(\"The dimention of the embedding Matrix should be : (nx , EMBEDDING_DIM)\")\r\n print(\"And it actually is: \" + str(embedding_matrix.shape))\r\n \r\n \r\n zeroList = np.zeros((1,EMBEDDING_DIM)).tolist()[0]\r\n x_train_glove = np.zeros((X_train.shape[0] , EMBEDDING_DIM * 30))\r\n row = 0\r\n for document in X_train:\r\n #Looping over each Tweet\r\n vectorTemp = [] #initializing the vector representation of the tweet\r\n #This will become a 1 x 300 vector\r\n for word in document:\r\n if word == 0:#This is the padded one\r\n vectorTemp.append(zeroList)\r\n else:\r\n vectorTemp.append(embedding_matrix[word , :].tolist())\r\n vectorTemp = list(itertools.chain.from_iterable(vectorTemp))#The original vectorTemp is a list of lists.\r\n #And this helps us in decomposing it\r\n x_train_glove[row] = vectorTemp\r\n row+=1\r\n\r\n x_test_glove = np.zeros((X_test.shape[0] , EMBEDDING_DIM * 30))\r\n row = 0\r\n for document in X_test:\r\n #Looping over each Tweet\r\n vectorTemp = [] #initializing the vector representation of the tweet\r\n #This will become a 1 x 300 vector\r\n for word in document:\r\n if word == 0:#This is the padded one\r\n vectorTemp.append(zeroList)\r\n else:\r\n vectorTemp.append(embedding_matrix[word , :].tolist())\r\n vectorTemp = list(itertools.chain.from_iterable(vectorTemp))#The original vectorTemp is a list of lists.\r\n #And this helps us in decomposing it\r\n x_test_glove[row] = vectorTemp\r\n row+=1\r\n\r\n print(\"Shape of Training set now is: \" + str(x_train_glove.shape))\r\n print(\"Shape of Test set now is: \" + str(x_test_glove.shape))\r\n \r\n return (x_train_glove , x_test_glove)", "def compute_bert_tokenized_rep(\n context,\n target_word,\n raw_bert,\n tf_idf_weighting,\n tf_idf_dicts,\n context_idx,\n subtract_context,\n bert_cls,\n):\n\n if bert_cls:\n excluded_features = [\"SEP]\"]\n else:\n excluded_features = [\"[CLS]\", \"[SEP]\"]\n\n raw_context_rep = eval(raw_bert)\n features = raw_context_rep[\"features\"]\n\n context_token_reps_excl_target = []\n\n target_rep = None\n\n # for each word, average hidden layers\n for feature in features:\n token = feature[\"token\"]\n # print(token)\n\n if token not in excluded_features:\n if bert_cls and token == \"[CLS]\":\n cls_rep = get_bert_token_rep(\n token,\n feature,\n tf_idf_weighting,\n tf_idf_dicts,\n context_idx,\n bert_cls,\n )\n elif token != target_word.lower(): # because BERT is uncased\n token_averaged_layers = get_bert_token_rep(\n token, feature, tf_idf_weighting, tf_idf_dicts, context_idx\n )\n context_token_reps_excl_target.append(token_averaged_layers)\n else:\n target_rep = get_bert_token_rep(\n token, feature, tf_idf_weighting, tf_idf_dicts, context_idx\n )\n\n if target_rep is not None:\n sentence_vec = compute_subtract_context(\n subtract_context, target_rep, context_token_reps_excl_target\n )\n if bert_cls:\n # concat cls token representation to sentence vector\n sentence_vec = np.concatenate((sentence_vec, cls_rep), axis=0)\n\n else:\n print(\"No BERT rep for the target word\")\n sentence_vec = None\n\n return sentence_vec", "def learn_embeddings(walks, pos_train_graph, w2v_model):\n\n worddictionary = pos_train_graph.get_node_to_index_map()\n reverse_worddictionary = pos_train_graph.get_index_to_node_map()\n\n if w2v_model.lower() == \"skipgram\":\n logging.info(\"SkipGram analysis \")\n model = SkipGramWord2Vec(walks,\n worddictionary=worddictionary,\n reverse_worddictionary=reverse_worddictionary, num_epochs=args.num_epochs,\n learning_rate= args.learning_rate,\n embedding_size=args.embedding_size, context_window=args.context_window)\n elif w2v_model.lower() == \"cbow\":\n logging.info(\"CBOW analysis \")\n model = ContinuousBagOfWordsWord2Vec(walks,\n worddictionary=worddictionary,\n reverse_worddictionary=reverse_worddictionary, num_epochs=args.num_epochs,\n learning_rate= args.learning_rate,\n embedding_size=args.embedding_size, context_window=args.context_window)\n elif w2v_model.lower() == \"glove\":\n logging.info(\"GloVe analysis \")\n n_nodes = pos_train_graph.node_count()\n cencoder = CooccurrenceEncoder(walks, window_size=2, vocab_size=n_nodes)\n cooc_dict = cencoder.build_dataset()\n model = GloVeModel(co_oc_dict=cooc_dict, vocab_size=n_nodes, embedding_size=args.embedding_size,\n context_size=args.context_window, num_epochs=args.num_epochs)\n else:\n raise ValueError('w2v_model must be \"cbow\", \"skipgram\" or \"glove\"')\n\n model.train()\n\n write_embeddings(args.embed_graph, model.embedding, reverse_worddictionary)", "def evaluate_features():\n # training set is from Stanford Sentiment Training Set\n training_set = parse_stanford(\"data/stanfordSentimentTreebank/stanfordSentimentTreebank/dictionary.txt\", \n \"data/stanfordSentimentTreebank/stanfordSentimentTreebank/sentiment_labels.txt\")\n # train weights for maxent model\n weights = train_maxent(training_set)\n # sort weights in descending order\n sorted_weights = { sentiment: sorted(weights[sentiment].iteritems(), \n key=lambda x:x[1], \n reverse=True) \n for sentiment in weights}\n\n # evaluate model for the top i weights, in this range (There should be # ~130000 weights total)\n for i in range(10000, 130000, 10000):\n # get the top i weights\n new_weights = {\"positive\": {}, \"negative\": {}, \"neutral\": {}}\n for sentiment in sorted_weights:\n new_weights[sentiment] = {w[0]:weights[sentiment][w[0]] \n for w in sorted_weights[sentiment][:i-1]}\n\n # load the episode that has gold standard features already assigned\n episode = parse_goldstandard(\"data/s1e9_gold.txt\", 1, 9)\n # calculate bag of words sentiments\n word_sentiments = parse_NRC(\"data/NRC-Emotion-Lexicon-v0.92/NRC-Emotion-Lexicon-v0.92/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt\")\n bag_of_words(episode, word_sentiments)\n # calculate maxent sentiments\n run_maxent(episode, new_weights)\n\n # evaulate maxent and bag_of_words sentiments against baseline\n print \"%s max_ent vs gold: %s\" % (i, compare_scores(episode, \n score1=\"maxent_score\", \n score2=\"gold_score\"))\n print \"%s bow vs gold: %s\" % (i, compare_scores(episode, \n \"bow_score\", \n score2=\"gold_score\"))", "def score(self, current):\n\t\tlogprob = 0.0\n\t\tlm_state = self.lm.begin()\n\t\tfor pair in current:\n\t\t\tif pair.phrase != None and pair.fPhrase != None:\n\t\t\t\tlogprob += pair.phrase.logprob\n\t\t\t\tfor word in pair.phrase.english.split():\n\t\t\t\t\t(lm_state, word_logprob) = self.lm.score(lm_state, word)\n\t\t\t\t\tlogprob += word_logprob\n\n\t\t\t\tlogprob -= self.distortionProb(pair.tgtStartPos, pair.tgtEndPos)\n\t\tlogprob += self.lm.end(lm_state)\n\t\t# print 'score : ' + str(logprob)\n\t\treturn logprob", "def logprob(self, prior_context, target_word):\n\n # dealing with unseen words\n if prior_context not in self.words_dict:\n prior_context = \"<UNK>\"\n if target_word not in self.words_dict:\n target_word = \"<UNK>\"\n\n # for unseen combinations: default dicts have default value of 0.0\n bigram_count = self.bigram_dict[(prior_context, target_word)]\n context_count = self.words_dict[prior_context]\n\n\n # add 0.25 smoothing for out-of-vocabulary words\n prob = (bigram_count + 0.25) / (context_count + 0.25 * len(self.token_list))\n\n return np.log(prob) / np.log(2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the context before the given relation emotion or cause
def get_context_before(self, reln1): before = self.tweet.tokens[0:reln1[0].idx-1] if before: self.bef = self.calc_glove_score(before) else: self.bef = np.full(self.glove_size, 1.e-28)
[ "def get_prev_sentence(self, sentence):\n for prev_sentence, sentence_node, relation_type in self.graph.in_edges(sentence[\"id\"], keys=True):\n if relation_type == self.sentence_order_edge_type:\n return self.graph.node[prev_sentence]\n return None", "def context(self):\n return self.scenario.context", "def _getDefaultReviewContext(self, entity, org_admin,\n mentor):\n\n from soc.modules.gsoc.logic.models.proposal_duplicates import logic \\\n as duplicates_logic\n from soc.modules.gsoc.logic.models.review_follower import logic as \\\n review_follower_logic\n\n context = {}\n\n context['student'] = entity.scope\n context['student_name'] = entity.scope.name()\n\n if entity.mentor:\n context['mentor_name'] = entity.mentor.name()\n else:\n context['mentor_name'] = \"No mentor assigned\"\n\n # set the possible mentors in the context\n possible_mentors = entity.possible_mentors\n\n if not possible_mentors:\n context['possible_mentors'] = \"None\"\n else:\n mentor_names = []\n\n for mentor_key in possible_mentors:\n possible_mentor = mentor_logic.logic.getFromKeyName(\n mentor_key.id_or_name())\n mentor_names.append(possible_mentor.name())\n\n context['possible_mentors'] = ', '.join(mentor_names)\n\n # update the reviews context\n self._updateReviewsContext(context, entity)\n\n # update the scores context\n self._updateScoresContext(context, entity)\n\n if mentor:\n context['is_mentor'] = True\n if not entity.mentor or entity.mentor.key() != mentor.key():\n # which button to (un)propose yourself as mentor should we show\n if mentor.key() in possible_mentors:\n # show \"No longer willing to mentor\"\n context['remove_me_as_mentor'] = True\n else:\n # show \"I am willing to mentor\"\n context['add_me_as_mentor'] = True\n\n if org_admin:\n context['is_org_admin'] = True\n\n # when the duplicates can be visible obtain the\n # duplicates for this proposal\n if entity.program.duplicates_visible:\n fields = {'student': entity.scope,\n 'is_duplicate': True}\n\n duplicate_entity = duplicates_logic.getForFields(fields, unique=True)\n\n if duplicate_entity:\n # this list also contains the current proposal\n # entity, so remove it\n duplicate_keys = duplicate_entity.duplicates\n duplicate_keys.remove(entity.key())\n context['sp_duplicates'] = db.get(duplicate_keys)\n\n user_entity = user_logic.logic.getCurrentUser()\n\n # check if the current user is subscribed to public or private reviews\n fields = {'scope': entity,\n 'user': user_entity,}\n follower_entity = review_follower_logic.getForFields(fields, unique=True)\n\n if follower_entity:\n # pylint: disable=E1103\n context['is_subscribed'] = follower_entity.subscribed_public\n\n return context", "def _get_context(self, question):\n context = []\n element = question.getprevious()\n while element is not None and element.tag == 'p':\n context.append(element)\n element = element.getprevious()\n if context:\n context.reverse()\n return context", "def user32_GetPropertyInteractionContext(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"interactionContext\", \"contextProperty\", \"value\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_mention_head(mention, parser, graph):\r\n distances_to_root = []\r\n curr_head_and_pos = []\r\n sentence = graph.sentences[mention.sentence_id]\r\n\r\n joined_sentence = ' '.join(sentence)\r\n parser.parse(joined_sentence)\r\n\r\n for index in mention.indices:\r\n child = parser.get_word(index)\r\n child_lemma = parser.get_lemma(index)\r\n child_pos = parser.get_pos(index)\r\n head = parser.get_word(parser.get_head(index))\r\n\r\n if parser.get_head(index) in mention.indices and head != child:\r\n continue\r\n\r\n distances_to_root.append(get_distance_to_root(index, parser))\r\n curr_head_and_pos.append((child_lemma, child_pos))\r\n\r\n # Get the closest to the root\r\n best_index = np.argmin(distances_to_root)\r\n curr_head, curr_pos = curr_head_and_pos[best_index]\r\n\r\n return curr_head, curr_pos", "def get_parent(entity):\n return getattr(entity, meta.PARENT_IDENTIFIER, None)", "def get_entity_property_deps(parse, question_type):\n\n # Init ent and prop\n ent = []\n prop = []\n\n # Extract sentence structure\n lemmas = []\n pos = []\n dep = []\n for word in parse:\n lemmas.append(word.lemma_)\n pos.append(word.pos_)\n dep.append(word.dep_)\n\n sent = parse.text.replace(\"?\", \"\") # Strip question mark\n sent = sent.replace('\"', \"\") # Strip double apostrophe\n sent = sent.replace(\"'\", \"\") # Strip single apostrophe\n\n if question_type == \"XofY\":\n # Property: from AUX to ADP\n if pos.count(\"ADP\") == 1:\n try:\n prop = lemmas[pos.index('AUX') + 1:pos.index('ADP')]\n if len(prop) > 1:\n if \"the\" in prop: # strip 'the'\n prop.remove(\"the\")\n if \"a\" in prop: # strip 'a'\n prop.remove(\"a\")\n if \"an\" in prop: # strip 'an'\n prop.remove(\"an\")\n except ValueError: # 'AUX' not in list\n pass\n\n # Perhaps there is an 'of' in the entity, such as in 'Lord of the Rings'\n else:\n try:\n prop = lemmas[pos.index('AUX') + 1:pos.index('PROPN')]\n if len(prop) > 1:\n if \"the\" in prop: # strip 'the'\n prop.remove(\"the\")\n if \"a\" in prop: # strip 'a'\n prop.remove(\"a\")\n if \"an\" in prop: # strip 'an'\n prop.remove(\"an\")\n if \"of\" in prop: # strip 'of'\n prop.remove(\"of\")\n except:\n pass\n\n ent = sent.split(\" \")[pos.index('ADP') + 1:] # assuming it always ends with '?'\n\n elif question_type == \"verb_prop\":\n # Find entity: direct object (phrase!)\n for word in parse:\n if word.dep_ == 'dobj':\n ent = phrase(word)\n\n main_verb = parse[dep.index(\"ROOT\")]\n prop = [main_verb.lemma_]\n\n elif question_type == \"duration\":\n prop = {'P2047': 'duration'}\n # Find entity: probably follows main verb (ROOT)\n entity_quotes = []\n entity_istitle = []\n for word in parse:\n if word.text == '\"':\n entity_quotes.append((word.i, word.text))\n if word.text.istitle() and word.i != 0:\n entity_istitle.append((word.i, word.text))\n if len(entity_quotes) > 1:\n ent = parse[entity_quotes[0][0] + 1:entity_quotes[-1][0]].text.split(\" \")\n elif entity_istitle:\n ent = parse[entity_istitle[0][0]:entity_istitle[-1][0] + 1].text.split(\" \")\n else:\n ent = sent.split(\" \")[dep.index(\"ROOT\") + 1:]\n\n elif question_type == \"passive\":\n for word in parse:\n if word.dep_ == 'pobj':\n ent = phrase(word)\n\n # Find prop: probably follows main verb (ROOT)\n for word in parse:\n if word.pos_ == \"VERB\" and word.dep_ == \"ROOT\":\n prop = [word.text]\n elif question_type == \"location\":\n entity_quotes = []\n entity_istitle = []\n for word in parse:\n if word.text == '\"':\n entity_quotes.append((word.i, word.text))\n if word.text.istitle() and word.i != 0:\n entity_istitle.append((word.i, word.text))\n if len(entity_quotes) > 1:\n ent = parse[entity_quotes[0][0] + 1:entity_quotes[-1][0]].text.split(\" \")\n elif entity_istitle:\n ent = parse[entity_istitle[0][0]:entity_istitle[-1][0] + 1].text.split(\" \")\n else:\n ent = sent.split(\" \")[dep.index(\"ROOT\") + 1:]\n if 'from' in lemmas:\n prop = {'P19': 'place of birth', 'P495': 'country of origin'}\n elif 'filmed' in sent:\n prop = {'P915': 'filming location'}\n elif 'born' in lemmas:\n prop = {'P19': 'place of birth'}\n\n elif question_type == \"time\":\n entity_quotes = []\n entity_istitle = []\n for word in parse:\n if word.text == '\"':\n entity_quotes.append((word.i, word.text))\n if word.text.istitle() and word.i != 0:\n entity_istitle.append((word.i, word.text))\n if len(entity_quotes) > 1:\n ent = parse[entity_quotes[0][0] + 1:entity_quotes[-1][0]].text.split(\" \")\n elif entity_istitle:\n ent = parse[entity_istitle[0][0]:entity_istitle[-1][0] + 1].text.split(\" \")\n else:\n ent = sent.split(\" \")[dep.index(\"ROOT\") + 1:]\n if 'born' in sent or 'birthday' in sent:\n prop = {'P569': 'date of birth'}\n elif 'release' in lemmas or 'come out' in sent or 'premiere' in sent \\\n or 'publish' in lemmas or 'publicise' in lemmas:\n prop = {'P577': 'publication date'}\n elif 'die' in sent or 'pass away' in sent:\n prop = {'P570': 'date of death'}\n # Filter property answers based on data type of answer using VALUES\n pass\n elif question_type == \"what_A_is_X_Y\":\n entity_quotes = []\n entity_istitle = []\n for word in parse:\n if word.text == '\"':\n entity_quotes.append((word.i, word.text))\n if word.text.istitle() and word.i != 0:\n entity_istitle.append((word.i, word.text))\n if len(entity_quotes) > 1:\n ent = parse[entity_quotes[0][0] + 1:entity_quotes[-1][0]].text.split(\" \")\n elif entity_istitle:\n ent = parse[entity_istitle[0][0]:entity_istitle[-1][0] + 1].text.split(\" \")\n else:\n ent = sent.split(\" \")[dep.index(\"ROOT\") + 1:]\n if parse[-2:].text.split(\" \") == ['influenced', 'by'] or 'earned' in sent:\n prop = parse[-2:].text.split(\" \")\n elif \"AUX\" in pos:\n prop = parse[1:pos.index(\"AUX\")].text.split(\" \")\n elif question_type == \"what_which_verb\":\n # Find entity\n # Find property: probably second word (parse[1])\n prop = [lemmas[1]]\n elif question_type == \"whatXisY\":\n # Find entity\n # Find property: probably words between first word and POS:AUX\n prop = parse[1:pos.index(\"AUX\")].text.split(\" \")\n elif question_type == \"about\":\n entity_quotes = []\n entity_istitle = []\n for word in parse:\n if word.text == '\"':\n entity_quotes.append((word.i, word.text))\n if word.text.istitle() and word.i != 0:\n entity_istitle.append((word.i, word.text))\n if len(entity_quotes) > 1:\n ent = parse[entity_quotes[0][0] + 1:entity_quotes[-1][0]].text.split(\" \")\n elif entity_istitle:\n ent = parse[entity_istitle[0][0]:entity_istitle[-1][0] + 1].text.split(\" \")\n # Find entity\n prop = [\"main\", \"subject\"]\n elif question_type == \"what_is_Xs_Y\":\n # Find entity: Either between POS:AUX (lemmas[2]) and 's, or:\n # istitle()\n if \"'s\" in lemmas:\n prop = lemmas[2:lemmas.index(\"'s\")]\n else:\n prop = []\n for word in lemmas:\n if word.istitle():\n prop.append(word)\n # Find property: probably last two words of sentence (parse[-4:-2])\n prop = parse[-2:].text.split(\" \")\n\n elif question_type == \"count\":\n entity_quotes = []\n entity_istitle = []\n for word in parse:\n if word.text == '\"':\n entity_quotes.append((word.i, word.text))\n if word.text.istitle() and word.i != 0 and word.text != \"Academy\" and word.text != \"Awards\" and word.text != \"Award\":\n entity_istitle.append((word.i, word.text))\n if len(entity_quotes) > 1:\n ent = parse[entity_quotes[0][0]+1:entity_quotes[-1][0]].text.split(\" \")\n elif entity_istitle:\n ent = parse[entity_istitle[0][0]:entity_istitle[-1][0]+1].text.split(\" \")\n else:\n ent = sent.split(\" \")[dep.index(\"ROOT\") + 1:]\n if \"AUX\" in pos:\n prop = parse[2:pos.index(\"AUX\")].text.split(\" \")\n prop = list(set(prop) - set(ent))\n elif question_type == \"yes/no\":\n main_verb_id = dep.index(\"ROOT\")\n ent = lemmas[1:main_verb_id]\n prop_broad = lemmas[main_verb_id + 1:]\n prop = [w for w in prop_broad if pos[lemmas.index(w)] != \"DET\"]\n\n else:\n pass\n\n # Filter entity: starts with first capital letter and start is not an adjective (e.g. the Dutch movie ...)\n try:\n start = min([ent.index(word) for word in ent if word.istitle() and\n pos[sent.split(\" \").index(word)] != \"ADJ\"])\n ent = ent[start:]\n except ValueError:\n pass\n\n # Convert entity and property from list to string\n ent = \" \".join(ent)\n if type(prop) == list:\n prop = \" \".join(prop)\n ent, prop = retrieve_id_label(ent, prop)\n else:\n ent, prop_irrelevant = retrieve_id_label(ent, \"\")\n\n return ent, prop", "def agent_from_entity(self, relation, entity_id):\n # Extract sentence tags mapping ids to the text. We refer to this\n # mapping only if the entity doesn't appear in the grounded entity\n # list\n tags = _extract_sentence_tags(relation.tagged_sentence)\n\n if entity_id is None:\n return None\n self.num_entities += 1\n\n entity_id = _extract_id(entity_id)\n\n if entity_id not in relation.entities and \\\n entity_id not in tags:\n # Could not find the entity in either the list of grounded\n # entities of the items tagged in the sentence. Happens for\n # a very small percentage of the dataset.\n self.num_entities_not_found += 1\n return None\n\n if entity_id not in relation.entities:\n # The entity is not in the grounded entity list\n # Instead, make an ungrounded entity, with TEXT corresponding to\n # the words with the given entity id tagged in the sentence.\n entity_data = tags[entity_id]\n db_refs = {'TEXT': entity_data['text']}\n ag = Agent(normalize_medscan_name(db_refs['TEXT']),\n db_refs=db_refs)\n return ag, entity_data['bounds']\n else:\n entity = relation.entities[entity_id]\n bounds = (entity.ch_start, entity.ch_end)\n\n prop = entity.properties\n if len(prop.keys()) == 2 and 'Protein' in prop \\\n and 'Mutation' in prop:\n # Handle the special case where the entity is a protein\n # with a mutation or modification, with those details\n # described in the entity properties\n protein = prop['Protein']\n assert(len(protein) == 1)\n protein = protein[0]\n\n mutation = prop['Mutation']\n assert(len(mutation) == 1)\n mutation = mutation[0]\n\n db_refs, db_name = _urn_to_db_refs(protein.urn)\n\n if db_refs is None:\n return None\n db_refs['TEXT'] = protein.name\n\n if db_name is None:\n agent_name = db_refs['TEXT']\n else:\n agent_name = db_name\n\n # Check mutation.type. Only some types correspond to situations\n # that can be represented in INDRA; return None if we cannot\n # map to an INDRA statement (which will block processing of\n # the statement in process_relation).\n if mutation.type == 'AASite':\n # Do not handle this\n # Example:\n # MedscanEntity(name='D1', urn='urn:agi-aa:D1',\n # type='AASite', properties=None)\n return None\n elif mutation.type == 'Mutation':\n # Convert mutation properties to an INDRA MutCondition\n r_old, pos, r_new = _parse_mut_string(mutation.name)\n if r_old is None:\n logger.warning('Could not parse mutation string: ' +\n mutation.name)\n # Don't create an agent\n return None\n else:\n try:\n cond = MutCondition(pos, r_old, r_new)\n ag = Agent(normalize_medscan_name(agent_name),\n db_refs=db_refs, mutations=[cond])\n return ag, bounds\n except BaseException:\n logger.warning('Could not parse mutation ' +\n 'string: ' + mutation.name)\n return None\n elif mutation.type == 'MethSite':\n # Convert methylation site information to an INDRA\n # ModCondition\n res, pos = _parse_mod_string(mutation.name)\n if res is None:\n return None\n cond = ModCondition('methylation', res, pos)\n ag = Agent(normalize_medscan_name(agent_name),\n db_refs=db_refs, mods=[cond])\n return ag, bounds\n\n # Example:\n # MedscanEntity(name='R457',\n # urn='urn:agi-s-llid:R457-2185', type='MethSite',\n # properties=None)\n elif mutation.type == 'PhosphoSite':\n # Convert phosphorylation site information to an INDRA\n # ModCondition\n res, pos = _parse_mod_string(mutation.name)\n if res is None:\n return None\n cond = ModCondition('phosphorylation', res, pos)\n ag = Agent(normalize_medscan_name(agent_name),\n db_refs=db_refs, mods=[cond])\n return ag, bounds\n\n # Example:\n # MedscanEntity(name='S455',\n # urn='urn:agi-s-llid:S455-47', type='PhosphoSite',\n # properties=None)\n pass\n elif mutation.type == 'Lysine':\n # Ambiguous whether this is a methylation or\n # demethylation; skip\n\n # Example:\n # MedscanEntity(name='K150',\n # urn='urn:agi-s-llid:K150-5624', type='Lysine',\n # properties=None)\n return None\n else:\n logger.warning('Processor currently cannot process ' +\n 'mutations of type ' + mutation.type)\n else:\n # Handle the more common case where we just ground the entity\n # without mutation or modification information\n db_refs, db_name = _urn_to_db_refs(entity.urn)\n if db_refs is None:\n return None\n db_refs['TEXT'] = entity.name\n\n if db_name is None:\n agent_name = db_refs['TEXT']\n else:\n agent_name = db_name\n\n ag = Agent(normalize_medscan_name(agent_name),\n db_refs=db_refs)\n return ag, bounds", "def get_syntactic_parent(self, element):\n if \"constituent\" in element:\n element = element[\"constituent\"]\n\n return GraphWrapper.get_in_neighbour_by_relation_type(self.graph, element, self.syntactic_edge_type)", "def getNotifyContext(self):\n return self.context", "def before_get_relationship(self, relationship_field, related_type_, related_id_field, view_kwargs):\n raise NotImplementedError", "def get_previous_sibling(self):\n previous = super(BaseLesson, self).get_previous_sibling()\n try:\n if previous.course_id == self.course_id:\n return previous\n else:\n return None\n except:\n return None", "def _previous_level(self, cr, uid, policy_level, context=None):\n previous_level_ids = self.search(\n cr,\n uid,\n [('policy_id', '=', policy_level.policy_id.id),\n ('level', '<', policy_level.level)],\n order='level desc',\n limit=1,\n context=context)\n return previous_level_ids[0] if previous_level_ids else None", "def _context_of_message(message):\r\n context_node = message.parentNode\r\n context_name_element = _getElementByTagName(context_node, \"name\")\r\n if context_name_element.firstChild:\r\n if context_name_element.firstChild.nodeValue:\r\n context_name = escape_context(\r\n [context_name_element.firstChild.nodeValue])\r\n else:\r\n context_name = []\r\n else:\r\n context_name = []\r\n try:\r\n c_node = _getElementByTagName(message, \"comment\")\r\n comment_text = _getText(c_node.childNodes)\r\n if comment_text:\r\n comment = escape_context([comment_text])\r\n else:\r\n comment = []\r\n except LinguistParseError, e:\r\n comment = []\r\n return (context_name + comment) or \"None\"", "def condition_context(self) -> 'outputs.ConditionContextResponse':\n return pulumi.get(self, \"condition_context\")", "def get_context():\n return _model", "def pre_get_entity_reconciliation_job(\n self,\n request: service.GetEntityReconciliationJobRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[service.GetEntityReconciliationJobRequest, Sequence[Tuple[str, str]]]:\n return request, metadata", "def user32_GetStateInteractionContext(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"interactionContext\", \"pointerInfo\", \"state\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Traverses a path to find files matching the specified glob patterns.
def GRRFind(path, patterns): for directory, sub_directories, files in os.walk(path): for pattern in patterns: directory_pattern = os.path.join(directory, pattern) for pattern_match in glob.iglob(directory_pattern): if os.path.isfile(pattern_match): yield pattern_match
[ "def _find_paths(dir_path, file_pattern):\n pattern = os.path.join(dir_path, \"**\", file_pattern)\n return glob.glob(pattern, recursive=True)", "def all_files(pattern, search_path, pathsep=os.pathsep):\r\n for path in search_path.split(pathsep):\r\n for match in glob.glob(os.path.join(path, pattern)):\r\n yield match", "def recursiveglob(root,pattern):\n matches = []\n for root, dirnames, filenames in os.walk(root):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n return matches", "def scan_paths(root_dir, pattern):\n\n root_dir = os.path.abspath(root_dir)\n\n pattern = re.compile(pattern)\n\n for root, dirs, files in scandir.walk(root_dir, followlinks=True):\n for name in files:\n\n # Match the extension.\n if pattern.search(name):\n yield os.path.join(root, name)", "def glob_for(path, extension):\n\n ext_glob = '*.{}'.format(extension)\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, ext_glob):\n yield os.path.join(root, filename)", "def glob(pattern, path=None, hidden=False, dirs=False):\n\n if path is None:\n with current_context() as ctx:\n path = ctx.get('paths.input')\n paths = glob2(join_path(path, pattern), include_hidden=hidden)\n if not dirs:\n paths = [v for v in paths if not os.path.isdir(v)]\n return paths", "def recfind(sdir: str, pattern: str) -> List[str]:\n file_paths = []\n\n for root, dir_names, file_names in os.walk(sdir):\n for file_name in file_names:\n if re.match(pattern, file_name):\n file_path = os.path.join(root, file_name)\n file_paths.append(file_path)\n else:\n continue\n\n return file_paths", "def files(folderpath, pattern=\"*\"):\n return [f for f in folderpath.glob(pattern) if f.is_file()]", "def find_source_files(self, path, formats):\n self.source_files = list()\n for root, directories, filenames in os.walk(path):\n for filename in filenames:\n if any([re.search(r\".{f}$\".format(f=f), filename) for f in formats]):\n self.source_files.append(os.path.relpath(os.path.join(root, filename), path))\n random.shuffle(self.source_files)\n self.logger.debug(\"Found %d files in directory %s and it's subdirectories\" % (self.get_files_count(), path))\n return", "def find_files_like(datapath, pattern):\n # No need to import these at module level\n from os import listdir\n import re\n\n # Traverse file list and look for `pattern`\n filenames = []\n pattern = re.compile(pattern)\n for file in listdir(datapath):\n if pattern.search(file):\n filenames.append(file)\n\n return filenames", "def file_scanning(path, file_format=r\".txt$\", full_path=True, sub_scan=False):\n if os.path.exists(path):\n file_paths = []\n for root, dirs, files in os.walk(path, topdown=True):\n paths = [file for file in files if re.search(file_format, file)]\n if full_path:\n paths = [os.path.join(root, file) for file in paths]\n file_paths.extend(paths)\n if not sub_scan:\n break\n if not file_paths:\n print(\"File with specified format not find\")\n return []\n else:\n print(\"Invalid path!\")\n return []\n return file_paths", "def reglob(path, regex):\n return [file for file in os.listdir(path) if re.match(regex, file)]", "def get_files(dir_path, pattern=\"\"):\n if os.path.isdir(dir_path):\n archives = []\n for dirpath, dirnames, filenames in os.walk(dir_path):\n for filename in filenames:\n if re.search(pattern, filename):\n archives.append(os.path.join(dirpath, filename))\n return archives\n else:\n raise FileUtilsError(dirErrorMsg + dir_path)", "def find(sdir: str, pattern: str) -> List[str]:\n file_paths = []\n\n for item in os.listdir(sdir):\n path = os.path.join(sdir, item)\n if os.path.isfile(path) and re.match(pattern, item) is not None:\n file_paths.append(path)\n else:\n continue\n\n return file_paths", "def find_files(base, pattern):\n return [n for n in fnmatch.filter(os.listdir(base), pattern) if\n os.path.isfile(os.path.join(base, n))]", "def find_files(base, pattern):\n return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]", "def find_files(pattern, base='.'):\n regex = re.compile(pattern) # 为了效率而编译了它\n matches = list()\n for root, dirs, files in os.walk(base):\n for f in files:\n if regex.match(f):\n matches.append(path.join(root, f))\n return matches", "def FindMatchingFiles(pattern):\n path, _ = os.path.split(pattern)\n if path == \"\":\n path = \".\" # os.listdir fails with empty path\n def match(s): return s.startswith(pattern) and s.endswith(\".h5\")\n return list(filter(match, os.listdir(path)))", "def read_all_files_directory(self, path):\n check = Apios.check_files_in_directory(self, path)\n if check:\n src = path + \"*\"\n files = glob.iglob(src)\n for name in files:\n try:\n with open(name) as f:\n sys.stdout.write(f.read())\n except IOError:\n print_exc()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the package path prefix from the package name.
def GRRGetPackagePrefix(package_name): package_components = package_name.split(".") if len(package_components) > 2: package_path_prefix = os.path.join(package_components[1:]) elif len(package_components) == 2: package_path_prefix = package_components[1] else: package_path_prefix = "" return package_path_prefix
[ "def GRRGetRelativeFilename(package_path_prefix, filename):\n if package_path_prefix:\n filename = os.path.relpath(filename, package_path_prefix)\n\n return filename", "def _get_package_name(self, path, root_path):\n\n return path_utils.get_package_name(path, root_path)", "def package_name(self, name: str) -> str:\n\n if name in self.package_aliases:\n return self.package_aliases[name]\n\n if not name:\n return name\n\n return \".\".join(\n self.package_aliases.get(part) or self._package_name(part)\n for part in name.split(\".\")\n )", "def get_real_prefix():\n return getattr(sys, 'real_prefix', sys.prefix)", "def _get_prefix_path(self, prefix: str) -> str:\n prefix_in_default_location = os.path.abspath(f'{prefix_dir}/{prefix}')\n if os.path.exists(prefix_in_default_location):\n print(\n f'Interpreting \"{prefix}\" as prefix-name '\n f'(points to \"{prefix_in_default_location}\")')\n return prefix_in_default_location\n else:\n abs_prefix = os.path.abspath(prefix)\n print(\n f'Interpreting \"{prefix}\" as path '\n f'(points to \"{abs_prefix}\")')\n return abs_prefix", "def _package_rootdir(name):\n initfile = importlib.util.find_spec(name).origin\n return os.path.split(os.path.split(initfile)[0])[0]", "def _package_fullname_to_path(fullname):\n return fullname.replace(\".\", os.sep) + os.sep", "def GRRGetPackagePath(package_path_prefix, sub_path):\n if package_path_prefix and sub_path:\n package_path = os.path.join(package_path_prefix, sub_path)\n elif sub_path:\n package_path = sub_path\n else:\n package_path = package_path_prefix\n\n return package_path", "def _get_root_package(self, component_name):\n\n return path_utils.get_main_package_name(component_name)", "def get_name() -> str:\n package_name = os.path.basename(PACKAGE_DIR)\n return package_name", "def get_name(install_path):\n package_name = install_path.split('/')\n return(package_name[-2])", "def thrift_filePackageName(self):\n\n filename = os.path.basename(self.thrift_file)\n try:\n dotPos = filename.index(\".\")\n return filename[0:dotPos]\n except ValueError:\n # no '.' in the name. return the whole basename\n return filename", "def pkgname(nevra):\n return nevra.rsplit('-', 2)[0]", "def prefix_for_table(self, table_name):\n return posixpath.join(str(self._base_dir), table_name)", "def import_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"import_prefix\")", "def install_prefix(self):\n return '/usr/local' if sys.prefix == '/usr' and self.on_debian else sys.prefix", "def get_prefix_from_dir(input_dir):\n\tfolder_name = input_dir.split('\\\\')[-1]\n\tprefix = ''\n\tfor token in folder_name.lower().split():\n\t\tprefix = prefix + token + '_'\n\n\treturn prefix + 'bochk_'", "def pkgname_filter(pkgname):\n if re.search('^py\\d{2}-', pkgname):\n # Strip Python version from pkgname, as it's present in the binary package name,\n # but is not present in the pkgsrc package name.\n return 'py-' + pkgname[5:]\n return pkgname", "def get_component_package_name(self):\n\n match = re.search('.+?(?=-[0-9])', self._get_version_metadata()['packageName'])\n\n return match.group(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the package path from the package path prefix and sub path.
def GRRGetPackagePath(package_path_prefix, sub_path): if package_path_prefix and sub_path: package_path = os.path.join(package_path_prefix, sub_path) elif sub_path: package_path = sub_path else: package_path = package_path_prefix return package_path
[ "def GRRGetPackagePrefix(package_name):\n package_components = package_name.split(\".\")\n\n if len(package_components) > 2:\n package_path_prefix = os.path.join(package_components[1:])\n elif len(package_components) == 2:\n package_path_prefix = package_components[1]\n else:\n package_path_prefix = \"\"\n\n return package_path_prefix", "def _package_fullname_to_path(fullname):\n return fullname.replace(\".\", os.sep) + os.sep", "def dfab_package_path():\n return os.path.join( __path__[0], \"../..\")", "def lib_path(self) -> str:\n return \"@{}//:pkg\".format(self.archive_name)", "def import_path(self):\n return os.path.join(self.remote_root, self.pkg) if self.pkg else self.remote_root", "def package_sub_path_from_relative_path(root, containing_file_path, relative_path):\n containing_path, _ = os.path.split(containing_file_path)\n return strip_prefixes(os.path.abspath(os.path.join(containing_path, remove_protocol(relative_path))), root)", "def get_installed_path(package: str) -> str:\n # if the package name is not the same as module name, module name should be\n # inferred. For example, mmcv-full is the package name, but mmcv is module\n # name. If we want to get the installed path of mmcv-full, we should concat\n # the pkg.location and module name\n pkg = get_distribution(package)\n possible_path = osp.join(pkg.location, package)\n if osp.exists(possible_path):\n return possible_path\n else:\n return osp.join(pkg.location, package2module(package))", "def parse_package_string(path):\n parts = path.split('.')\n\n # Is the last entry in the path capitalized?\n if parts[-1][0].isupper():\n return \".\".join(parts[:-1]), parts[-1]\n\n return path, \"\"", "def GRRGetRelativeFilename(package_path_prefix, filename):\n if package_path_prefix:\n filename = os.path.relpath(filename, package_path_prefix)\n\n return filename", "def remote_package_path(cls, remote_root, import_path):\n return cls.package_path(remote_root, import_path)", "def resolvePath(path):\n global prefix\n if os.path.isabs(path):\n return path\n return os.path.abspath(os.path.join(prefix, path))", "def _get_prefix_path(self, prefix: str) -> str:\n prefix_in_default_location = os.path.abspath(f'{prefix_dir}/{prefix}')\n if os.path.exists(prefix_in_default_location):\n print(\n f'Interpreting \"{prefix}\" as prefix-name '\n f'(points to \"{prefix_in_default_location}\")')\n return prefix_in_default_location\n else:\n abs_prefix = os.path.abspath(prefix)\n print(\n f'Interpreting \"{prefix}\" as path '\n f'(points to \"{abs_prefix}\")')\n return abs_prefix", "def _package_rootdir(name):\n initfile = importlib.util.find_spec(name).origin\n return os.path.split(os.path.split(initfile)[0])[0]", "def _path_for_main_spec(path):\n return path.replace(os.path.sep, \"/\")", "def _get_package_name(self, path, root_path):\n\n return path_utils.get_package_name(path, root_path)", "def _resolve_mod_path(self, curr_dir, from_mod):\n ndots = _num_leading_dots(from_mod)\n arr = from_mod[ndots:].split(\".\")\n if ndots != 0:\n prev = [\"..\"] * (ndots - 1)\n return os.path.abspath(os.path.join(curr_dir, *prev, normalize_path(\"/\".join(arr))))\n if arr[0] in self._pkg2modpath:\n return os.path.abspath(\n os.path.join(self._pkg2modpath[arr[0]], normalize_path(\"/\".join(arr[1:]))))\n return os.path.abspath(os.path.join(curr_dir, normalize_path(\"/\".join(arr))))", "def make_package_path(file_path, roots):\n\n # Prevents greedily matching against a shallow path when a deeper, better\n # matching path exists.\n roots.sort(key=len, reverse=True)\n\n for next_root in roots:\n if not next_root.endswith(os.sep):\n next_root += os.sep\n\n if file_path.startswith(next_root):\n relative_path = file_path[len(next_root):]\n return relative_path\n\n return file_path", "def getPackagePath(name,var=None,check=True,force=False):\n p = _externalPackages[name]\n\n # if checking enabled and current python interpreter version is high enough\n # then return empty string (package is not required)\n if not force and p.has_key('maxHexVersion'):\n if sys.hexversion >= int(p['maxHexVersion'],16):\n return \"\"\n\n if p.has_key('noarch') and p['noarch']:\n platf = 'noarch'\n else:\n platf = getPlatformString()\n \n if platf == 'NOPLATF':\n return ''\n\n prefix_path = [getExternalDir(),name,p['version'],platf]\n \n def transform(s):\n # get the distribution id\n try:\n distver = platf.split('-')[1]\n except IndexError:\n distver = platf\n\n # end look up the fixed python version in the pyver table if needed\n try:\n pyver = pyver_table[distver]\n except KeyError:\n pyver = sys.version[:3]\n return s%{\"PYVER\":pyver}\n\n if var:\n paths = [transform(os.path.join(*(prefix_path+[p]))) for p in p['vars'][var].split(':') if p]\n else:\n paths = [os.path.join(*prefix_path)]\n\n for p in paths:\n if not os.path.exists(p):\n from diane import getLogger\n logger = getLogger('setup')\n logger.debug('path %s does not exist (setting %s for %s)',p,var,name)\n\n return ':'.join(paths)", "def find_plugin_package_subpaths(plugin_source_root, subpath, package_name=None):\r\n result = []\r\n \r\n def add(package_name):\r\n package_dir = os.path.join(plugin_source_root, package_name)\r\n \r\n if not os.path.exists(package_dir):\r\n raise ValueError(\"Invalid plug-in package name: '%s'\" % package_name)\r\n \r\n path = os.path.normpath(os.path.join(package_dir, subpath))\r\n if os.path.exists(path):\r\n result.append((package_name, path))\r\n \r\n add('common')\r\n if package_name not in (None, '', 'common'):\r\n add(package_name)\r\n \r\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the filename relative to the package path prefix.
def GRRGetRelativeFilename(package_path_prefix, filename): if package_path_prefix: filename = os.path.relpath(filename, package_path_prefix) return filename
[ "def GRRGetPackagePrefix(package_name):\n package_components = package_name.split(\".\")\n\n if len(package_components) > 2:\n package_path_prefix = os.path.join(package_components[1:])\n elif len(package_components) == 2:\n package_path_prefix = package_components[1]\n else:\n package_path_prefix = \"\"\n\n return package_path_prefix", "def thrift_filePackageName(self):\n\n filename = os.path.basename(self.thrift_file)\n try:\n dotPos = filename.index(\".\")\n return filename[0:dotPos]\n except ValueError:\n # no '.' in the name. return the whole basename\n return filename", "def _get_prefix_path(self, prefix: str) -> str:\n prefix_in_default_location = os.path.abspath(f'{prefix_dir}/{prefix}')\n if os.path.exists(prefix_in_default_location):\n print(\n f'Interpreting \"{prefix}\" as prefix-name '\n f'(points to \"{prefix_in_default_location}\")')\n return prefix_in_default_location\n else:\n abs_prefix = os.path.abspath(prefix)\n print(\n f'Interpreting \"{prefix}\" as path '\n f'(points to \"{abs_prefix}\")')\n return abs_prefix", "def package_path(filename, quoted = 1):\n name = os.path.join(os.path.dirname(__file__),filename)\n if quoted:\n return \"\\\"\" + name + \"\\\"\"\n return name", "def _package_fullname_to_path(fullname):\n return fullname.replace(\".\", os.sep) + os.sep", "def get_name() -> str:\n package_name = os.path.basename(PACKAGE_DIR)\n return package_name", "def pathName(self, filename: str) -> str:\n x = self\n theDir = x.baseDirName()\n return g.finalize_join(theDir, filename) if theDir else ''", "def _file2name(self, filename):\n rel_filename = re.sub('^{0}/'.format(self._content_root()),\n '', filename)\n fullname = os.path.splitext(rel_filename)[0]\n return fullname", "def get_relative_pathname(self):\n return os.path.join(Syllabus.SYLLABUS_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)", "def get_filename(self, migration):\n return os.path.join(self.directory, '{}{}'.format(migration, self.ext))", "def _get_filename(self) -> \"std::string\" :\n return _core.SATImportOptions__get_filename(self)", "def get_package_filename(pkg):\n return '%s-%s-%s.%s.rpm' % (pkg.name, pkg.version, pkg.release, pkg.arch)", "def _get_filename(self) -> \"std::string\" :\n return _core.ImportOptions__get_filename(self)", "def filename_core (apath):\n if (apath is None): # sanity check\n return ''\n return os.path.basename(os.path.splitext(apath)[0])", "def workflow_filename():\n stacks = inspect.stack()\n frame = inspect.stack()[len(stacks) - 1]\n full_path = frame[0].f_code.co_filename\n filename, _ = os.path.splitext(os.path.basename(full_path))\n filename = argo_safe_name(filename)\n return filename", "def transform_path():\n return str(pathlib.Path(__file__).parent.absolute())", "def _modulenamemangle(self, modfilename):\n if not self.source:\n return modfilename\n return os.path.splitext(os.path.basename(modfilename))[0]", "def get_full_path(self) -> str:\r\n return self.location + \"\\\\\" + self.filename + \".\" + self.ext", "def _get_package_name(self, path, root_path):\n\n return path_utils.get_package_name(path, root_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find data files as defined by the specifications.
def GRRFindDataFiles(data_files_specs): data_files = {} for package_name, sub_paths, patterns in data_files_specs: package_path_prefix = GRRGetPackagePrefix(package_name) package_data_files = [] for sub_path in sub_paths: package_path = GRRGetPackagePath(package_path_prefix, sub_path) for filename in GRRFind(package_path, patterns): package_data_files.append(filename) data_files.setdefault(package_name, []) for filename in package_data_files: filename = GRRGetRelativeFilename(package_path_prefix, filename) data_files[package_name].append(filename) return data_files
[ "def find_data_files():\n\n if \"freebsd\" in sys.platform:\n manpagebase = pjoin('man', 'man1')\n else:\n manpagebase = pjoin('share', 'man', 'man1')\n\n # Simple file lists can be made by hand\n manpages = [f for f in glob(pjoin('docs','man','*.1.gz')) if isfile(f)]\n if not manpages:\n # When running from a source tree, the manpages aren't gzipped\n manpages = [f for f in glob(pjoin('docs','man','*.1')) if isfile(f)]\n\n # And assemble the entire output list\n data_files = [ (manpagebase, manpages) ]\n\n return data_files", "def data_files(self):\n #tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)\n #tf_record_pattern = os.path.join(FLAGS.data_dir, 'test_*')\n tf_record_pattern = FLAGS.data_dir\n #tf_record_pattern = os.path.join(FLAGS.data_dir, FLAGS.ImageSet_basename + '*')\n print(\"tf_record_pattern:\")\n print(tf_record_pattern)\n data_files = tf.gfile.Glob(tf_record_pattern)\n print(data_files)\n if not data_files:\n print('No files found for dataset %s/%s at %s' % (self.name,\n self.subset,\n FLAGS.data_dir))\n\n self.download_message()\n exit(-1)\n return data_files", "def get_data_files ():\n installpath = os.path.join (\"share\", \"ocempgui\")\n path = \"data\"\n dirs = get_directory_list (path)\n filedict = {}\n for path in dirs:\n files = glob.glob (os.path.join (path, \"*.*\"))\n if files:\n filedict[path] = files\n return get_installation_files (\"data\", installpath, filedict)", "def datafiles(self):\n for r in self._source_doc['Resources'].find(['root.datafile', 'root.suplimentarydata', 'root.datadictionary']):\n yield r", "def data_files(globname):\n here = os.path.dirname(__file__)\n data_dir = os.path.join(here, \"data\")\n matches = [permfile for permfile in glob.glob(os.path.join(data_dir, globname))]\n return matches", "def get_data_files (self):\r\n data = []\r\n if not self.packages:\r\n return data\r\n for package in self.packages:\r\n # Locate package source directory\r\n src_dir = self.get_package_dir(package)\r\n\r\n # Compute package build directory\r\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\r\n\r\n # Length of path to strip from found files\r\n plen = 0\r\n if src_dir:\r\n plen = len(src_dir)+1\r\n\r\n # Strip directory from globbed filenames\r\n filenames = [\r\n file[plen:] for file in self.find_data_files(package, src_dir)\r\n ]\r\n data.append((package, src_dir, build_dir, filenames))\r\n return data", "def find_output_files(self):\n # find the base path to the files\n if self.input_uri.startswith(\"file\"):\n test_path = self.input_uri.split(\":\")[-1]\n if os.path.isdir(test_path):\n base_path = os.path.abspath(test_path)\n elif os.path.isdir(os.path.join(os.getcwd(), test_path)):\n base_path = os.path.join(os.getcwd(), test_path)\n else:\n raise ValueError(f\"output path {test_path} does not exist\")\n search_fits = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.fits\"\n # trailer files\n search_tra = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.tra\"\n # env file\n search_env = f\"{base_path}/{self.ipppssoot.lower()}_cal_env.txt\"\n\n else:\n base_path = os.getcwd()\n subfolder = os.path.join(base_path, \"inputs\", self.ipppssoot)\n search_fits = f\"{subfolder}/{self.ipppssoot.lower()[0:5]}*.fits\"\n search_tra = f\"{subfolder}/{self.ipppssoot.lower()[0:5]}*.tra\"\n search_env = f\"{subfolder}/{self.ipppssoot.lower()}_cal_env.txt\"\n\n self.divider(\"Finding output data for:\", repr(search_fits))\n files = glob.glob(search_fits)\n\n self.divider(\"Finding output trailers for:\", repr(search_tra))\n files.extend(glob.glob(search_tra))\n\n self.divider(\"Finding output cal env file for:\", repr(search_env))\n files.extend(glob.glob(search_env))\n\n return list(sorted(files))", "def findDataFile(self):\n modPath = os.path.abspath(sys.path[0])\n if modPath.endswith('.zip'): # for py2exe\n modPath = os.path.dirname(modPath)\n pathList = [dataFilePath, os.path.join(modPath, '../data/'), modPath]\n fileList = ['units.dat']\n if lang and lang != 'C':\n fileList[0:0] = ['units_%s.dat' % lang, 'units_%s.dat' % lang[:2]]\n for path in filter(None, pathList):\n for fileName in fileList:\n try:\n f = codecs.open(os.path.join(path, fileName), 'r', 'utf-8')\n lineList = f.readlines()\n f.close()\n return lineList\n except IOError:\n pass\n raise UnitDataError, _('Can not read \"units.dat\" file')", "def data_files():\r\n data_files = []\r\n path = get_data_path(media=\"media\")\r\n for f in findall(path):\r\n data_files.append(('media/models_media', [f]))\r\n return data_files", "def file_search(self):\n self.file_dir = Path('../../data/banbury_data_extractor/' + self.tissue)\n search_term = f\"*eye_{self.animal}_*\" # data exist as eye_[animal_number]_[tissue] and [tissue]_eye_[animal_number]\n for i, file in enumerate(sorted(Path(self.file_dir).glob(search_term))):\n self.file_dict.update({f\"animal_{self.animal}_{self.tissue}_{i}\": f\"{file}\"})\n return self.file_dir, self.file_dict", "def get_all_data_files():\n all_files = []\n\n for dirname, _, filenames in os.walk(DATA_FILES_PATH):\n for filename in filenames:\n\n # ignore anything that is not a .mat file\n if MAT in filename:\n # Example: complete_path_to_file = /create-spectrograms/data/eeg_record1.mat\n complete_path_to_file = os.path.join(dirname, filename)\n all_files.append(complete_path_to_file)\n\n return all_files", "def find_input_files(self):\n test_path = self.input_uri.split(\":\")[-1]\n if os.path.isdir(test_path):\n base_path = os.path.abspath(test_path)\n elif os.path.isdir(os.path.join(os.getcwd(), test_path)):\n base_path = os.path.join(os.getcwd(), test_path)\n else:\n raise ValueError(f\"input path {test_path} does not exist\")\n # check for tarred inputs\n cwd = os.getcwd()\n search_tar = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.tar.gz\"\n tar_files = glob.glob(search_tar)\n with sysexit.exit_on_exception(exit_codes.INPUT_TAR_FILE_ERROR, \"Failed extracting inputs from\", tar_files):\n if len(tar_files) == 0:\n raise RuntimeError(f\"No input tar files for: {repr(search_tar)}\")\n elif len(tar_files) == 1:\n log.info(\"Extracting inputs from: \", tar_files)\n os.chdir(base_path)\n with tarfile.open(tar_files[0], \"r:gz\") as tar_ref:\n tar_ref.extractall()\n else:\n raise RuntimeError(f\"Too many tar files for: {repr(search_tar)} = {tar_files}\")\n os.chdir(cwd)\n # get input files\n search_str = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.fits\"\n self.divider(\"Finding input data using:\", repr(search_str))\n # find the base path to the files\n files = glob.glob(search_str)\n return list(sorted(files))", "def find_metadata_filenames(metadata, PROJECT_ROOT_DIR, IMAGING_DATES):\n \n if not IMAGING_DATES:\n IMAGING_DATES = sorted(metadata['date_recording_yyyymmdd'].astype(str).unique())\n \n # Convert list of camera-channel-hydra triplets to a dictionary with \n # hydra-channel unique keys, and camera serial numbers as values\n HYCH2CAM_DICT = {}\n for line in CAM2CH_list:\n HYCH2CAM_DICT[(line[2], line[1])] = line[0] \n\n # PATHS TO RESULTS DIRECTORIES (User-defined, optional) \n maskedvideo_dir = os.path.join(PROJECT_ROOT_DIR, \"MaskedVideos\")\n featuresN_dir = os.path.join(PROJECT_ROOT_DIR, \"Results\")\n rawvideo_dir = os.path.join(PROJECT_ROOT_DIR, \"RawVideos\")\n \n n_filepaths = sum([isinstance(path, str) for path in metadata.filename])\n n_entries = len(metadata.filename)\n print(\"%d/%d filename entries found in metadata\" % (n_filepaths, n_entries))\n \n if not n_entries == n_filepaths:\n print(\"Attempting to fetch filenames for %d entries...\" % (n_entries - n_filepaths)) \n \n # Return list of pathnames for masked videos in the data directory under given imaging dates\n maskedfilelist = []\n date_total = []\n print(\"Looking in '%s' for MaskedVideo files...\" % maskedvideo_dir)\n for i, expDate in enumerate(IMAGING_DATES):\n tmplist = lookforfiles(os.path.join(maskedvideo_dir, expDate), \".*.hdf5$\")\n date_total.append(len(tmplist))\n maskedfilelist.extend(tmplist)\n print(\"%d masked video snippets found for imaging dates provided:\\n%s\" % \\\n (len(maskedfilelist), [*zip(IMAGING_DATES, date_total)])) \n \n \n #%% # Parse over metadata entries and use well number/run number/date/hydra rig \n # information to locate and fill in missing filename entries\n \n for i, filepath in enumerate(metadata.filename): \n if isinstance(filepath, str):\n # If filepath is already present, make sure there are no whitespaces\n metadata.loc[i,'filename'] = filepath.replace(\" \", \"\") \n \n else:\n file_info = metadata.iloc[i]\n \n # Extract date/run/hydra/plate/well info\n date = str(file_info['date_recording_yyyymmdd'].astype(int)) # which experiment date?\n hydra = file_info['instrument_name'] # which Hydra rig?\n well_number = str(file_info['well_number']) # which well in 96-well plate?\n run_number = str(int(file_info['run_number'])) # which run?\n \n # Obtain channel number from well-to-channel mapping dictionary: 'UPRIGHT_96WP'\n channel = UPRIGHT_96WP.iloc[np.where(UPRIGHT_96WP == well_number)].columns[0][0]\n \n # Obtain camera serial number unique ID using hydra/channel combination, using dictionary: HYCH2CAM_DICT\n cameraID = HYCH2CAM_DICT[(hydra,channel)] # which camera?\n \n # Update cameraID in metadata\n metadata.loc[i,'camera_number'] = cameraID\n \n # Use run/date/cameraID to construct regex query to find results filename \n # Query by regex using run/date/camera info\n file_querystr1 = '_run{0}_'.format(run_number)\n file_querystr2 = '_' + date + '_\\d{6}.' + cameraID\n \n # Retrieve filepath, using data recorded in metadata\n for file in maskedfilelist:\n # If folder name contains '_runX_' (WARNING: this is manually assigned/typed when recording)\n if re.search(file_querystr1, file.lower()): # or re.search(file_querystr1, file.lower()):\n # If filepath contains: '_date_XXXXXX.cameraID'...\n # NB: auto-generated to include date/time(exact time not known)/cameraID\n if re.search(file_querystr2, file.lower()):\n # Record filepath to MaskedVideo file: '*/metadata.hdf5'\n metadata.loc[i,'filename'] = os.path.dirname(file)\n \n matches = sum([isinstance(path, str) for path in metadata.filename]) - n_filepaths\n print(\"Complete!\\n%d/%d filenames added.\\n\" % (matches, n_entries - n_filepaths))\n \n #%% OBTAIN RAW VIDEO FILEPATHS FOR COUNTING SNIPPETS\n \n # Return list of pathnames for raw videos in the data directory for given imaging dates\n rawvideolist = []\n date_total = []\n print(\"Looking in '%s' for RawVideo files...\" % rawvideo_dir)\n for i, expDate in enumerate(IMAGING_DATES):\n tmplist = lookforfiles(os.path.join(rawvideo_dir, expDate), \".*.mp4$\")\n date_total.append(len(tmplist))\n rawvideolist.extend(tmplist)\n \n # Get list of pathnames for featuresN files for given imaging dates\n featuresNlist = []\n print(\"Looking in '%s' for featuresN files...\" % featuresN_dir)\n for i, expDate in enumerate(IMAGING_DATES):\n tmplist = lookforfiles(os.path.join(featuresN_dir, str(expDate)), \".*_featuresN.hdf5$\")\n featuresNlist.extend(tmplist)\n \n # Pre-allocate columns in metadata for storing n_video_chunks, n_featuresN_files\n metadata['rawvideo_snippets'] = ''\n metadata['featuresN_exists'] = ''\n \n # Add n_video_snippets, n_featuresN_files as columns to metadata\n for i, masked_dirpath in enumerate(metadata.filename):\n # If filepath is present, return the filepaths to the rest of the chunks for that video\n if isinstance(masked_dirpath, str):\n # Record number of video segments (chunks) in metadata\n raw_dirpath = masked_dirpath.replace(\"/MaskedVideos\", \"/RawVideos\")\n snippetlist = [snippet for snippet in rawvideolist if raw_dirpath in snippet] \n n_snippets = len(snippetlist)\n metadata.loc[i, 'rawvideo_snippets'] = int(n_snippets)\n \n # Record the number of featuresN files\n featuresN_dirpath = masked_dirpath.replace(\"/MaskedVideos\", \"/Results\")\n featlist = [featpath for featpath in featuresNlist if featuresN_dirpath in featpath]\n n_featuresN = len(featlist)\n metadata.loc[i, 'featuresN_exists'] = (n_featuresN * n_snippets == n_snippets)\n \n print(\"(Metadata updated: Checked for featuresN files and tallied number of RawVideo snippets found.)\") \n \n#%% OPTIONAL EXTRAS\n \n # Ensure 'food_type' entries are grouped correctly by converting to uppercase\n metadata['food_type'] = metadata['food_type'].str.upper() \n \n # Calculate L1 diapause duration (if possible) and append to results\n diapause_required_columns = ['date_bleaching_yyyymmdd','time_bleaching',\\\n 'date_L1_refed_yyyymmdd','time_L1_refed_OP50']\n \n if all(x in metadata.columns for x in diapause_required_columns):\n # Extract bleaching dates and times\n bleaching_datetime = [datetime.datetime.strptime(date_str + ' ' +\\\n time_str, '%Y%m%d %H:%M:%S') for date_str, time_str\\\n in zip(metadata['date_bleaching_yyyymmdd'].astype(str),\\\n metadata['time_bleaching'])]\n # Extract dispensing dates and times\n dispense_L1_datetime = [datetime.datetime.strptime(date_str + ' ' +\\\n time_str, '%Y%m%d %H:%M:%S') for date_str, time_str\\\n in zip(metadata['date_L1_refed_yyyymmdd'].astype(str),\\\n metadata['time_L1_refed_OP50'])]\n # Estimate duration of L1 diapause\n L1_diapause_duration = [dispense - bleach for bleach, dispense in \\\n zip(bleaching_datetime, dispense_L1_datetime)]\n \n # Add duration of L1 diapause to metadata\n metadata['L1_diapause_seconds'] = [int(timedelta.total_seconds()) \\\n for timedelta in L1_diapause_duration]\n\n return metadata", "def get_files(data_path, file_type):\n p = Path(data_path).glob('**/*')\n files = [x for x in p if x.is_file() and str(x).endswith(file_type)]\n\n if len(files) == 0 or files is None:\n print(f\"No {file_type} files found in location {data_path}\\n\")\n return\n\n # print(f\"Found {len(files)} in {data_path}\\n\")\n\n return files", "def _glob_files(DATA_PATH):\n FILE_LIST = glob.glob(DATA_PATH + \"/*\")\n return FILE_LIST", "def findFiles(self, identifier='Ne'):\n self.identifier = identifier\n\n self.files = g.glob('*{0:>s}*.fits'.format(self.identifier))\n\n numb = len(self.files)\n\n if numb == 0:\n self.log.info('Did not find any FITS files containg {0:>s}, will exit'.format(self.identifier))\n sys.exit('Did not find files containg {0:>s}...'.format(self.identifier))\n else:\n self.log.info('Found {0:d} frames...'.format(numb))\n\n return self.files", "def win32_data_files():\r\n # Include the main enchant DLL\r\n try:\r\n libEnchant = get_resource_filename(\"libenchant.dll\")\r\n except Error:\r\n libEnchant = get_resource_filename(\"libenchant-1.dll\")\r\n mainDir = os.path.dirname(libEnchant)\r\n dataFiles = [('',[libEnchant])]\r\n # And some specific supporting DLLs\r\n for dll in os.listdir(mainDir):\r\n if not dll.endswith(\".dll\"):\r\n continue\r\n for prefix in (\"iconv\",\"intl\",\"libglib\",\"libgmodule\"):\r\n if dll.startswith(prefix):\r\n break\r\n else:\r\n continue\r\n dataFiles[0][1].append(os.path.join(mainDir,dll))\r\n # And anything found in the supporting data directories\r\n dataDirs = (\"share/enchant/myspell\",\"share/enchant/ispell\",\"lib/enchant\")\r\n for dataDir in dataDirs:\r\n files = []\r\n fullDir = os.path.join(mainDir,os.path.normpath(dataDir))\r\n for fn in os.listdir(fullDir):\r\n fullFn = os.path.join(fullDir,fn)\r\n if os.path.isfile(fullFn):\r\n files.append(fullFn)\r\n dataFiles.append((dataDir,files))\r\n return dataFiles", "def find(self):\n extensions = [\".dll\", \".exe\", \".drv\", \".cpl\", \".ocx\", \".mui\"]\n for path, dirs, files in os.walk(self.disk):\n for filename in files:\n name = filename.lower()\n if name[-4:] in extensions:\n yield name, os.path.join(path, filename)", "def find(self, filter=None):\n if not filter:\n filter = {}\n limit = filter['limit'] if 'limit' in filter else 30\n data = []\n i = 0\n for filename in os.listdir(self.path):\n i += 1\n if i > limit:\n break\n _id, ext = os.path.splitext(filename)\n data.append(self.load(_id))\n\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Truncates/pads a float f to n decimal places without rounding
def truncate(f, n): s = '{}'.format(f) if 'e' in s or 'E' in s: return '{0:.{1}f}'.format(f, n) i, p, d = s.partition('.') return '.'.join([i, (d+'0'*n)[:n]])
[ "def truncate_values(f, n=3):\n if not np.isnan(f):\n if type(f) is not np.ndarray:\n s = '{}'.format(f) # convert float to string\n if 'e' in s or 'E' in s:\n return float('{0:.{1}f}'.format(f, n))\n else:\n s = '{}'.format(f[0]) # convert np.ndarray to string\n if 'e' in s or 'E' in s:\n return float('{0:.{1}f}'.format(f[0], n))\n i, p, d = s.partition('.')\n return float('.'.join([i, (d+'0'*n)[:n]]))\n else:\n return f", "def float_f(f):\n return '{:.0f}'.format(f)", "def round_to_sf(x, n):\n\tif n < 1:\n\t\traise ValueError(\"number of significant digits must be >= 1\")\n\treturn \"%.*f\" % (n-1, x)", "def to_six_dp(n: Union[str, int, float]) -> float:\n # use decimal logic to avoid Python rounding weirdness\n return float(Decimal(str(n)).quantize(Decimal(\"0.000001\"), rounding=ROUND_HALF_UP))", "def humanize_float(n: float) -> float:\n if n == 0 or not math.isfinite(n):\n return n\n digits = int(math.ceil(math.log10(abs(n))))\n # Since we don't do scientific notation, we only round decimal parts, not integer parts.\n # That is, 1.3 seconds instead of 1.3333333 is ok but 3000 seconds instead of 3333 is less so.\n sigfigs = max(digits, 4)\n return round(n, sigfigs - digits)", "def pad(str_float_value):\n return \"%.8f\"%float(str_float_value)", "def fixed_pt_str(val, n):\r\n # (432,3) => 0.432 (3210,3) => 3.210 (23,1) => 2.3\r\n s = str(val)\r\n i = len(s)\r\n pfx = \"0.0000000\"\r\n return pfx[:n+2-i] + s if i <= n else s[:-n] + '.' + s[-n:]", "def floatToFixed(value, precisionBits):\n\treturn round(value * (1<<precisionBits))", "def truncate_float(number, length):\n\n number = number * pow(10, length)\n number = int(number)\n number = float(number)\n number /= pow(10, length)\n return number", "def flops(val=None):\n return 0", "def roundsf(number, sf):\r\n # can't have < 1 s.f.\r\n sf = max(sf,1.)\r\n rounding = int(np.ceil(-np.log10(number) + sf - 1.))\r\n \r\n return np.round(number, rounding)", "def round(value, ndigits='0'):\n\n pass", "def f_price(p):\n return f'{p:.2f}'.rjust(6, ' ')", "def sfloat(x, num_chars=10):\n x = float(x)\n num_chars = int(num_chars)\n start, end = str(x).split('.')\n start_chars = len(str(float(start)))\n if start_chars > num_chars:\n raise Exception('Try num_chars = {}'.format(start_chars))\n return '{}' \\\n .format('{:' + str(num_chars) + '.' +\n str(num_chars - start_chars + 1) + 'f}') \\\n .format(x)", "def floatToFixedToFloat(value, precisionBits):\n\tscale = 1<<precisionBits\n\treturn round(value * scale) / scale", "def __format_float(self, num: float) -> str:\n # format the float to remove trailing zeros and decimal point\n precision: int = Plugin.config[\"precision\"]\n return f\"{num:.{precision}f}\".rstrip(\"0\").rstrip(\".\")", "def _round_up(value, n):\n return n * ((value + (n - 1)) // n)", "def round_up(number, sig_fig=3):\n return float(('%.'+('%i' % sig_fig)+'g') % number)", "def test_padded_rounded_float(self):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bans the user, deleting x day worth of messages. reinvite him
async def bansoft(self, ctx, user: discord.Member, *, reason: str, days: int): server = ctx.message.server channel = ctx.message.channel can_ban = channel.permissions_for(server.me).kick_members author = ctx.message.author if author == user: await self.bot.say("I cannot let you do that. Self-harm is " "bad \N{PENSIVE FACE}") return elif not self.is_allowed_by_hierarchy(server, author, user): await self.bot.say("I cannot let you do that. You are " "not higher than the user in the role " "hierarchy.") return if reason is None: await self.bot.say("La raison est obligatoire. Y-en a t'il " "vraiment une ? \N{THINKING FACE}") return try: invite = await self.bot.create_invite(server, max_age=3600*24*(days+1)) invite = "\nInvite: " + invite except: invite = "" if can_ban: try: try: # We don't want blocked DMs preventing us from banning msg = await self.bot.send_message(user, "You have been banned and " "then unbanned as a quick way to delete your messages.\n" "You can now join the server again.{}".format(invite)) except: pass self.temp_cache.add(user, server, "SOFTBAN") await self.bot.ban(user, days) logger.info("{}({}) softbanned {}({}), deleting {} day(s) worth " "of messages".format(author.name, author.id, user.name, user.id, days)) await self.cases.new_case(server, action="SOFTBAN", mod=author, user=user, reason=reason) self.temp_cache.add(user, server, "UNBAN") await self.bot.unban(server, user) await self.bot.say("Done. Enough chaos.", delete_after=15) except discord.errors.Forbidden: await self.bot.say("My role is not high enough to softban that user.", delete_after=self.settings[server.id]["delete_delay"]) await self.bot.delete_message(msg) except Exception as e: print(e) else: await self.bot.say("I'm not allowed to do that.", delete_after=self.settings[server.id]["delete_delay"])
[ "async def ban(ctx, members : commands.Greedy[discord.Member],\n delete_days : typing.Optional[int] = 0, *,\n reason : str):\n for member in members:\n await member.ban(delete_message_days=delete_days, reason=reason)\n await ctx.send(f'Banned {member.mention}')", "async def _prune_user(self, ctx, max_messages:int, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_messages == False:\n await self.bot.say(\"Sorry, this doesn't work on this server (No manage_messages Permission)!\")\n return\n if max_messages > 1500:\n await self.bot.say(\"2 many messages\")\n for user in users:\n def m(k):\n return k.author == user\n deleted = await self.bot.purge_from(ctx.message.channel, limit=max_messages, check=m)\n if len(deleted) == 0:\n x = await self.bot.say(\"No messages found by {0} \\nwithin the given max message search amount!\".format(user.name))\n await asyncio.sleep(10)\n await self.bot.delete_message(x)\n return\n x = await self.bot.say(\"ok, removed `{0}` messages out of `{1}` searched for `{2}`\".format(str(len(deleted)), str(max_messages), user.name))\n await asyncio.sleep(10)\n await self.bot.delete_message(x)\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass", "async def purge(\r\n self,\r\n ctx,\r\n amount: int,\r\n user: typing.Optional[discord.Member] = None,\r\n *,\r\n matches=None,\r\n ):\r\n await ctx.message.delete()\r\n\r\n def check_msg(msg):\r\n if msg.id == ctx.message.id:\r\n return True\r\n if user is not None:\r\n if msg.author.id != user.id:\r\n return False\r\n if matches is not None:\r\n if matches not in msg.content:\r\n return False\r\n return not msg.pinned\r\n\r\n if amount > 1000:\r\n return await ctx.send(\r\n \"You can not purge more than 1000 messages each time!\"\r\n )\r\n\r\n amount = await ctx.channel.purge(limit=amount, check=check_msg)\r\n if len(amount) == 0:\r\n return await ctx.send(\"There are no messages that I can delete!\")\r\n\r\n await ctx.send(\r\n f\"{len(amount)} messages have been purged by {ctx.author.mention}\",\r\n delete_after=3,\r\n )", "async def unbanall(self, ctx):\n banned_users = await ctx.guild.bans()\n for user in banned_users:\n await ctx.guild.unban(user, reason=\"Mass Unban\")\n await ctx.send(\"All members successfully unbanned.\")", "async def purge(ctx, limit: int = 100, user: discord.Member = None, *, matches: str = None):\r\n #logger.info('purge', extra={'ctx': ctx})\r\n await ctx.message.delete()\r\n def check_msg(msg):\r\n if msg.id == ctx.message.id:\r\n return True\r\n if user is not None:\r\n if msg.author.id != user.id:\r\n return False\r\n if matches is not None:\r\n if matches not in msg.content:\r\n return False\r\n return True\r\n\r\n deleted = await ctx.channel.purge(limit=limit, check=check_msg)\r\n msg = await ctx.send('Delete : ' + len(deleted))\r\n await asyncio.sleep(2)\r\n await msg.delete()", "async def _unfriendtime(self, ctx, days: int):\n await self.config.guild(ctx.guild).unfriendafter.set(days)\n await ctx.send(f\"Inactivity days till auto unfriend is {days} days.\")", "async def _prune(self, ctx, num_to_delete : int, *message):\n # tmp channel/server pointer\n chan = ctx.message.channel\n serv = ctx.message.guild\n\n #if num_to_delete > 100: # api only allows up to 100\n # await ctx.send('Sorry, only up to 100') # TODO - copy thing done in\n # return # self._paste\n if num_to_delete < 1: # delete nothing?\n await ctx.send('umm... no') # answer: no\n return\n\n # if the first word in the message matches a user,\n # remove that word from the message, store the user\n try:\n user = dh.get_user(serv or self.bot, message[0])\n if user:\n message = message[1:]\n except:\n logger.debug('did not match a user')\n user = None\n\n check = lambda m: True\n if user: # if a user was matched, delete messages for that user only\n logger.debug(f'pruning for user {user.name}')\n check = lambda m: str(m.author.id) == str(user.id)\n\n message = ' '.join(message) #make the message a string\n\n logs = []\n async for m in chan.history(limit=num_to_delete, reverse=True):\n if check(m):\n logs.append(m)\n\n deleted = len(logs)\n old = False\n while len(logs) > 0: # while there are messages to delete\n if len(logs) > 1: # if more than one left to delete and not old,\n if not old: # attempt batch delete [2-100] messages\n try:\n await chan.delete_messages(logs[:100])\n except: # if problem when batch deleting\n old = True # then the messages must be old\n if old: # if old, traverse and delete individually\n for entry in logs[:100]:\n try:\n await entry.delete()\n except:\n logger.exception('<{0.author.name}> {0.content}'.format(entry))\n logs = logs[100:]\n else: # if only one message, delete individually\n await logs[0].delete()\n logs.remove(logs[0])\n\n #report that prume was complete, how many were prunned, and the message\n await ctx.send(ok('Deleted {} message{} {}'.format(\n deleted,\n '' if deleted == 1 else 's',\n f'({message})' if message else ''\n )\n )\n )", "def delete_new_senders(self, user):\n try:\n self.database.execute(\"delete from chat where message = \"\n \"'###new_message###' \"\n \"and frm = '%s'\" % user)\n self.database.commit()\n except sqlite3.IntegrityError:\n pass", "def post(chat, message, args):\n if message.sender.id != 26170256: #Only admin command\n message.reply(\"This command it's only for the admin of the bot\")\n return\n\n c.execute('SELECT * FROM users')\n users_list = c.fetchall()\n\n message = \" \".join(message.text.split(\" \", 1)[1:])\n\n n = 0\n\n for res in users_list:\n n += 1\n\n if n < 50:\n continue\n\n try:\n bot.chat(res[0]).send(message)\n chat.send(\"Post sent to \"+str(res[0]))\n except botogram.api.ChatUnavailableError:\n c.execute('DELETE FROM users WHERE user_id={}'.format(res[0]))\n chat.send(\"The user \"+str(res[0])+\" has blocked your bot, so I removed him from the database\")\n conn.commit()\n except Exception as e:\n chat.send(\"*Unknow error :(*\\n\"+str(e))\n\n chat.send(\"<b>Done!</b>\\nThe message has been delivered to all users\") #Yeah\n conn.commit()", "async def on_member_unban(self, guild: Guild, user: MemberOrUser):", "async def botpurge_messages(self, ctx, limit: int=100, ago: int=None):\n\n if limit > 101:\n return await ctx.send('Limit can not be more than **101**.')\n\n def check(message):\n return self.bot.user.id == message.author.id\n\n if ago:\n around = datetime.datetime.utcnow() - datetime.timedelta(hours=ago)\n else:\n around = datetime.datetime.utcnow()\n\n htime = around.strftime('%d/%m/%Y - %H:%M')\n\n purged = await ctx.channel.purge(limit=limit, check=check, around=around)\n await ctx.send(f'Purged **{len(purged)}** messages from myself around `{htime} UTC`.')", "async def prune(self, ctx: GuildContext, user: discord.Member, days: int = 1, *channels: discord.TextChannel):\n channels = channels or ctx.guild.text_channels # type: ignore\n deleted = []\n for channel in channels:\n await ctx.send(f'Deleting messages from {channel.mention}')\n deleted += await channel.purge(\n limit=None,\n check=lambda m: m.author == user,\n after=datetime.now() - timedelta(days=days))\n await ctx.send(f\"Deleted {len(deleted) - 1} messages.\", delete_after=1)", "def decline_invitation(user: models.User, game: models.Game):\n if game.invited != user:\n raise RequestError(2111)\n _end_socket_session(game.host, game)\n game.delete_instance()", "async def game_unban(self, ctx, member: discord.Member):\n _ = self.bot._\n language = await self.bot.db.get_pref(ctx.channel, \"language\")\n\n await self.bot.db.set_stat(ctx.channel, member, \"banned\", False)\n await self.bot.send_message(ctx=ctx, message=_(\":ok: Done, user unbanned. :eyes:\", language))", "def perma_ban_checker(context: CallbackContext, session: scoped_session) -> None:\n vote_limit = config[\"telegram\"][\"max_user_votes_per_day\"]\n stats = (\n session.query(UserStatistic)\n .filter(UserStatistic.votes >= vote_limit)\n .filter(UserStatistic.date == date.today())\n .all()\n )\n\n for stat in stats:\n # Check how often the user reached the limit in the last week\n days_above_limit = (\n session.query(UserStatistic)\n .filter(UserStatistic.votes >= vote_limit)\n .filter(UserStatistic.date >= date.today() - timedelta(days=6))\n .filter(UserStatistic.date <= date.today() - timedelta(days=1))\n .filter(UserStatistic.user == stat.user)\n .all()\n )\n\n # If the user reached the limit on two other days in the last week (three days in total)\n if len(days_above_limit) >= 2:\n stat.user.banned = True", "async def take(self, ctx, amount: int):\n for member in ctx.message.mentions:\n member = ctx.get_user_data(member)\n member.wallet -= amount\n await member.save()\n await ctx.send(\"Taken {0} :dollar: from {1}\".format(amount, [i.display_name for i in ctx.message.mentions]))", "async def votekick(self, ctx):\n global votekick_on\n if votekick_on == 1:\n await ctx.channel.send(\"A votekick is already open\")\n return\n votekick_on = 1\n voted = [] # list of user ID's that voted\n message = ctx.message # message string\n user = message.content.split()[1] # user mention\n votes_needed = 9 # int(len(message.guild.members) / 2 + 1)\n current_votes = 0\n await ctx.message.channel.send(\n \"Starting a votekick for user {}.\"\n \" Type \\\"vote yes\\\" or \\\"vote no\\\" \\n\"\n \"Votes needed:({}/{}) \"\n .format(user, current_votes, votes_needed))\n bot_message = None\n async for message in ctx.channel.history(limit=50):\n if message.author.name == \"Iku-nee\":\n bot_message = message\n break\n timer = 60\n current_timer = 0\n while current_timer < timer:\n time.sleep(1)\n async for message in ctx.channel.history(limit=50):\n if message.content == \"vote yes\":\n if MiscellaneousCog.verify_vote(voted, message.author.id):\n current_votes += 1\n voted.append(message.author.id)\n\n await bot_message.edit(\n content=\"Starting a votekick for user {}.\"\n \" Type \\\"vote yes\\\" or \\\"vote no\\\" \\n\"\n \"Votes needed:({}/{})\"\n .format(user, current_votes, votes_needed))\n await message.delete()\n elif message.content == \"vote no\":\n voted.append(message.author.id)\n await message.delete()\n\n current_timer += 1\n if current_votes >= votes_needed:\n await ctx.channel.send(\"The senate decision is exile, begone {}!\".format(user))\n else:\n await ctx.channel.send(\"The senate spares {}\".format(user))\n votekick_on = 0", "async def botunban(self, ctx, user: discord.User):\n\t\tif checks.is_owner_check(user) or user == self.bot.user:\n\t\t\tawait self.bot.say(\"Ha ha. Very funny.\")\n\t\t\treturn\n\t\tbotdata.serverinfo(ctx.message.server).botunban(user)\n\t\tawait self.bot.say(\"{} is free of their restraints and may once again use commands\".format(user.mention))", "async def unban_command(self, ctx, banned=None, *, reason=None):\n guild = ctx.message.guild\n if banned is None or not banned.isdigit():\n await error_response(ctx.channel, 'Please provide a user ID!')\n return\n\n for (_, user) in await guild.bans():\n if int(user.id) == int(banned):\n await guild.unban(user, reason=reason)\n await quick_response(ctx.channel,\n f'Successfully unbanned user: `{user.name}#{user.discriminator}`!')\n return\n\n await error_response(ctx.channel, 'Failed to find a banned user with that ID!')\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables auto deletion of repeated messages
async def delrepeats(self, ctx): server = ctx.message.server if not self.settings[server.id]["delete_repeats"]: self.settings[server.id]["delete_repeats"] = True await self.bot.say("Messages repeated up to 3 times will " "be deleted.") else: self.settings[server.id]["delete_repeats"] = False await self.bot.say("Repeated messages will be ignored.", delete_after=self.settings[server.id]["delete_delay"]) dataIO.save_json(self._ownersettings_path, self.settings)
[ "def mail_clear_deleted(self):\n self._dels.clear()", "def delete(self):\n if self.is_deleted:\n return\n if self.is_question:\n self.topic.is_deleted = True\n for tag in self.topic.tags:\n atomic_add(tag, 'tagged', -1)\n else:\n atomic_add(self.topic, 'reply_count', -1)\n self.is_deleted = True", "def press_delete():\n offsets_deleted_sentences.append(current_offset)\n logging.info(f\"{current_offset} deleted\")\n prepare_next_turn()", "def test_delete_inbox_repliers(self):\n pass", "async def _before_delete(self) -> None:", "async def _prune(self, ctx, num_to_delete : int, *message):\n # tmp channel/server pointer\n chan = ctx.message.channel\n serv = ctx.message.guild\n\n #if num_to_delete > 100: # api only allows up to 100\n # await ctx.send('Sorry, only up to 100') # TODO - copy thing done in\n # return # self._paste\n if num_to_delete < 1: # delete nothing?\n await ctx.send('umm... no') # answer: no\n return\n\n # if the first word in the message matches a user,\n # remove that word from the message, store the user\n try:\n user = dh.get_user(serv or self.bot, message[0])\n if user:\n message = message[1:]\n except:\n logger.debug('did not match a user')\n user = None\n\n check = lambda m: True\n if user: # if a user was matched, delete messages for that user only\n logger.debug(f'pruning for user {user.name}')\n check = lambda m: str(m.author.id) == str(user.id)\n\n message = ' '.join(message) #make the message a string\n\n logs = []\n async for m in chan.history(limit=num_to_delete, reverse=True):\n if check(m):\n logs.append(m)\n\n deleted = len(logs)\n old = False\n while len(logs) > 0: # while there are messages to delete\n if len(logs) > 1: # if more than one left to delete and not old,\n if not old: # attempt batch delete [2-100] messages\n try:\n await chan.delete_messages(logs[:100])\n except: # if problem when batch deleting\n old = True # then the messages must be old\n if old: # if old, traverse and delete individually\n for entry in logs[:100]:\n try:\n await entry.delete()\n except:\n logger.exception('<{0.author.name}> {0.content}'.format(entry))\n logs = logs[100:]\n else: # if only one message, delete individually\n await logs[0].delete()\n logs.remove(logs[0])\n\n #report that prume was complete, how many were prunned, and the message\n await ctx.send(ok('Deleted {} message{} {}'.format(\n deleted,\n '' if deleted == 1 else 's',\n f'({message})' if message else ''\n )\n )\n )", "def after_delete(self):\n pass", "def delete_messages(self, id_list):\r\n\r\n for msg_id in id_list:\r\n self.delete_message(msg_id)", "def test_api_v1_messages_delete_multiple_delete(self):\n pass", "async def bulkmessagedelete(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"bulk_message_delete\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET bulk_message_delete = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for bulk messages being deleted.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET bulk_message_delete = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for bulk messages being deleted.\")\n await ctx.send(embed=embed)", "async def purge(self, ctx, msgs: int, *, txt=None):\n await self.bot.delete_message(ctx.message)\n if msgs < 10000:\n async for message in self.bot.logs_from(ctx.message.channel, limit=msgs):\n try:\n if txt:\n if txt.lower() in message.content.lower():\n await self.bot.delete_message(message)\n else:\n await self.bot.delete_message(message)\n except:\n pass\n else:\n await self.bot.send_message(ctx.message.channel, bot_prefix + 'Too many messages to delete. Enter a number < 10000')", "async def messagedelete(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"message_delete\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET message_delete = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for deleting message.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET message_delete = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for deleting message.\")\n await ctx.send(embed=embed)", "def delete_placed_calls_list(self):\r\n\t\tfor message in self._gv.placed().messages:\r\n\t\t\tmessage.delete()", "def delete_received_calls_list(self):\r\n\t\tfor message in self._gv.received().messages:\r\n\t\t\tmessage.delete()", "def delete_message(self, msg_id):\r\n\r\n self.handle.dele(msg_id)", "def test_delete_inbox_replier(self):\n pass", "def __deleteMessage(self, i):\n\t\tassert self.__mailboxCondition.locked()\n\t\treceivers = self.__currentProcess._getReceivers()\n\t\tdel self.__mailbox[i]\n\t\tfor receiver in receivers:\n\t\t\tif receiver is not self:\n\t\t\t\t# Acquire lock to prevent a remote process from calling addHandler and\n\t\t\t\t# resetting __lastMessage:\n\t\t\t\treceiver.__lock.acquire()\n\t\t\t\tif receiver.__lastMessage > i:\n\t\t\t\t\treceiver.__lastMessage -= 1\n\t\t\t\treceiver.__lock.release()\n\t\t\t# end if\n\t\t# end for", "async def on_message_delete(self, message) -> None:\n if self.karma_service.find_message(str(message.id)) is not None:\n await self.remove_karma(message, message.guild, \"message delete\")", "async def clean(self, ctx, max_messages:int):\n if max_messages > 1500:\n await self.bot.say(\"2 many messages\")\n return\n count = 0\n async for message in self.bot.logs_from(ctx.message.channel, limit=max_messages+1):\n if message.author == self.bot.user:\n asyncio.ensure_future(self.bot.delete_message(message))\n await asyncio.sleep(0.21)\n count += 1\n x = await self.bot.say(\"Removed `{0}` messages out of `{1}` searched messages\".format(count, max_messages))\n await asyncio.sleep(10)\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass\n await self.bot.delete_message(x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables auto ban for messages mentioning X different people
async def banmentionspam(self, ctx, max_mentions: int=False): server = ctx.message.server if max_mentions: if max_mentions < 5: max_mentions = 5 self.settings[server.id]["ban_mention_spam"] = max_mentions await self.bot.say("Autoban for mention spam enabled. " "Anyone mentioning {} or more different people " "in a single message will be autobanned." "".format(max_mentions)) else: if self.settings[server.id]["ban_mention_spam"] is False: await send_cmd_help(ctx) return self.settings[server.id]["ban_mention_spam"] = False await self.bot.say("Autoban for mention spam disabled.", delete_after=self.settings[server.id]["delete_delay"]) dataIO.save_json(self._ownersettings_path, self.settings)
[ "async def auto_bans(self, ctx):\n config = hf.database_toggle(ctx, self.bot.db['auto_bans'])\n if config['enable']:\n await ctx.send('Enabled the auto bans module. I will now automatically ban all users who join with '\n 'a discord invite link username or who join and immediately send an amazingsexdating link')\n else:\n await ctx.send('Disabled the auto bans module. I will no longer auto ban users who join with a '\n 'discord invite link username or who spam a link to amazingsexdating.')\n await hf.dump_json()", "async def bansoft(self, ctx, user: discord.Member, *, reason: str, days: int):\r\n server = ctx.message.server\r\n channel = ctx.message.channel\r\n can_ban = channel.permissions_for(server.me).kick_members\r\n author = ctx.message.author\r\n\r\n if author == user:\r\n await self.bot.say(\"I cannot let you do that. Self-harm is \"\r\n \"bad \\N{PENSIVE FACE}\")\r\n return\r\n elif not self.is_allowed_by_hierarchy(server, author, user):\r\n await self.bot.say(\"I cannot let you do that. You are \"\r\n \"not higher than the user in the role \"\r\n \"hierarchy.\")\r\n return\r\n if reason is None:\r\n await self.bot.say(\"La raison est obligatoire. Y-en a t'il \"\r\n \"vraiment une ? \\N{THINKING FACE}\")\r\n return\r\n try:\r\n invite = await self.bot.create_invite(server, max_age=3600*24*(days+1))\r\n invite = \"\\nInvite: \" + invite\r\n except:\r\n invite = \"\"\r\n if can_ban:\r\n try:\r\n try: # We don't want blocked DMs preventing us from banning\r\n msg = await self.bot.send_message(user, \"You have been banned and \"\r\n \"then unbanned as a quick way to delete your messages.\\n\"\r\n \"You can now join the server again.{}\".format(invite))\r\n except:\r\n pass\r\n self.temp_cache.add(user, server, \"SOFTBAN\")\r\n await self.bot.ban(user, days)\r\n logger.info(\"{}({}) softbanned {}({}), deleting {} day(s) worth \"\r\n \"of messages\".format(author.name, author.id, user.name,\r\n user.id, days))\r\n await self.cases.new_case(server,\r\n action=\"SOFTBAN\",\r\n mod=author,\r\n user=user,\r\n reason=reason)\r\n self.temp_cache.add(user, server, \"UNBAN\")\r\n await self.bot.unban(server, user)\r\n await self.bot.say(\"Done. Enough chaos.\", delete_after=15)\r\n except discord.errors.Forbidden:\r\n await self.bot.say(\"My role is not high enough to softban that user.\", delete_after=self.settings[server.id][\"delete_delay\"])\r\n await self.bot.delete_message(msg)\r\n except Exception as e:\r\n print(e)\r\n else:\r\n await self.bot.say(\"I'm not allowed to do that.\", delete_after=self.settings[server.id][\"delete_delay\"])", "async def blacklist(self, ctx, option):\n\t\tblacklist_amount = 0\n\t\tmentions = ctx.message.mentions\n\n\t\tif not mentions:\n\t\t\treturn await ctx.send(\"You didn't mention anyone\")\n\n\t\tif option not in ['+', '-', 'add', 'remove']:\n\t\t\treturn await ctx.send('Invalid option \"%s\" specified, use +, -, add, or remove' % option, expire_in=20)\n\n\t\tfor user in mentions:\n\t\t\tif user.id == load_config.owner:\n\t\t\t\tprint(\"[Commands:Blacklist] The owner cannot be blacklisted.\")\n\t\t\t\tawait ctx.send(\"The owner cannot be blacklisted.\")\n\t\t\t\tmentions.remove(user)\n\n\t\tif option in ['+', 'add']:\n\t\t\twith open(\"settings/blacklist.txt\", \"r\") as fp:\n\t\t\t\tfor user in mentions:\n\t\t\t\t\tfor line in fp.readlines():\n\t\t\t\t\t\tif user.id + \"\\n\" in line:\n\t\t\t\t\t\t\tmentions.remove(user)\n\n\t\t\twith open(\"settings/blacklist.txt\", \"a+\") as fp:\n\t\t\t\tlines = fp.readlines()\n\t\t\t\tfor user in mentions:\n\t\t\t\t\tif user.id not in lines:\n\t\t\t\t\t\tfp.write(\"{}\\n\".format(user.id))\n\t\t\t\t\t\tblacklist_amount += 1\n\t\t\treturn await ctx.send('{} user(s) have been added to the blacklist'.format(blacklist_amount))\n\n\t\telif option in ['-', 'remove']:\n\t\t\twith open(\"settings/blacklist.txt\", \"r\") as fp:\n\t\t\t\tlines = fp.readlines()\n\t\t\twith open(\"settings/blacklist.txt\", \"w\") as fp:\n\t\t\t\tfor user in mentions:\n\t\t\t\t\tfor line in lines:\n\t\t\t\t\t\tif user.id + \"\\n\" != line:\n\t\t\t\t\t\t\tfp.write(line)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfp.write(\"\")\n\t\t\t\t\t\t\tblacklist_amount += 1\n\t\t\t\treturn await ctx.send('{} user(s) have been removed from the blacklist'.format(blacklist_amount))", "async def ban ( self , ctx , member : discord.Member = None , * reason ):\n if member is not None :\n if reason:\n reason = ' ' .join (reason)\n else :\n reason = None\n await member.ban ( reason = reason)\n else :\n await ctx.send ( ' **: no_entry: ** No user specified! ' )", "async def ban(ctx, member: discord.Member):\n uid = member.id\n if uid not in mods and uid not in banlist and uid != overlord:\n r.lpush(\"banlist\", uid)\n banlist.append(uid)\n await ctx.send(\"Begone!\")\n return True\n else:\n return False", "async def massban(self, ctx, reason: ActionReason, *members: MemberID):\n\n for member_id in members:\n await ctx.guild.ban(discord.Object(id=member_id), reason=reason)\n\n await ctx.send('\\N{OK HAND SIGN}')", "def ban(self, user):\n self.chat(self.sock, \".ban {}\\r\\n\".format(user).encode(\"utf-8\"))", "async def on_member_ban(self, guild: Guild, user: MemberOrUser):", "async def ban(self, ctx, user: discord.Member, *, reason=None):\n embed = discord.Embed(title=\"You have been banned from {}\".format(ctx.guild.name), description=reason)\n embed.set_footer(text=\"Ban by \" + ctx.author.display_name)\n embed.set_thumbnail(url=ctx.guild.icon_url)\n await user.ban(reason=reason)\n await ctx.send(f\"**User {user.mention} has been banned by {ctx.author.mention}**\")\n await user.send(embed=embed)", "async def ban(ctx, members : commands.Greedy[discord.Member],\n delete_days : typing.Optional[int] = 0, *,\n reason : str):\n for member in members:\n await member.ban(delete_message_days=delete_days, reason=reason)\n await ctx.send(f'Banned {member.mention}')", "async def ban(ctx: commands.Context, choice):\n await pick_or_ban(ctx, \"bans\", choice)", "async def game_ban(self, ctx, member: discord.Member):\n _ = self.bot._\n language = await self.bot.db.get_pref(ctx.channel, \"language\")\n\n await self.bot.db.set_stat(ctx.channel, member, \"banned\", True)\n await self.bot.send_message(ctx=ctx, message=_(\":ok: Done, user banned. :gun:\", language))", "async def botban(self, ctx, user: discord.User):\n\t\tif checks.is_owner_check(user):\n\t\t\tawait self.bot.say(\"Ya can't ban mah owner, man. 😠\")\n\t\t\treturn\n\t\tif checks.is_admin_check(ctx.message.channel, user):\n\t\t\tawait self.bot.say(\"Ya can't ban other admins\")\n\t\t\treturn\n\t\tif user == self.bot.user:\n\t\t\tawait self.bot.say(\"Lol you can't ban me, silly\")\n\t\t\treturn\n\t\tbotdata.serverinfo(ctx.message.server).botban(user)\n\t\tawait self.bot.say(\"{} has henceforth been banned from using commands 😤\".format(user.mention))", "async def memberban(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"member_ban\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET member_ban = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for members being banned.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET member_ban = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for members being banned.\")\n await ctx.send(embed=embed)", "async def spam(self, ctx, *, msg=\"This is spam\"):\n msg = msg.split()\n try:\n if int(msg[0]) > 50:\n await self.bot.say(\"That is too much spam. I will only spam 50x\")\n msg[0] = \"50\"\n try:\n for _ in range(int(msg[0])):\n await self.bot.say(\" \".join(msg[1:]))\n except:\n for _ in range(int(msg[0])):\n await self.bot.say(\"This is spam.\")\n except:\n for _ in range(10):\n await self.bot.say(\" \".join(msg))", "def ban(self, obj: Device) -> str:\n obj.is_banned = True\n return self.update(obj)", "def add_ban(self, mask):\n mask = mask.lower()\n if mask in self.banned:\n return\n\n self.banned.add(mask)\n self.bot.db.set_plugin_value(PLUGIN, 'banned', list(self.banned))\n self.banned_re = self.re_join(sopel.tools.get_hostmask_regex(b).pattern for b in self.banned)", "async def unbanall(self, ctx):\n banned_users = await ctx.guild.bans()\n for user in banned_users:\n await ctx.guild.unban(user, reason=\"Mass Unban\")\n await ctx.send(\"All members successfully unbanned.\")", "async def _appeal(self, ctx):\n await ctx.send(\"Banned? Appeal here: https://discord.gg/J9YVWgF\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes channel from ignore list Defaults to current one
async def unignore_channel(self, ctx, channel: discord.Channel=None): current_ch = ctx.message.channel server = ctx.message.server if not channel: if current_ch.id in self.ignore_list["CHANNELS"]: self.ignore_list["CHANNELS"].remove(current_ch.id) dataIO.save_json(self._ignore_list_path, self.ignore_list) await self.bot.say("This channel has been removed from the ignore list.", delete_after=self.settings[server.id]["delete_delay"]) else: await self.bot.say("This channel is not in the ignore list.", delete_after=self.settings[server.id]["delete_delay"]) else: if channel.id in self.ignore_list["CHANNELS"]: self.ignore_list["CHANNELS"].remove(channel.id) dataIO.save_json(self._ignore_list_path, self.ignore_list) await self.bot.say("Channel removed from ignore list.", delete_after=self.settings[server.id]["delete_delay"]) else: await self.bot.say("That channel is not in the ignore list.", delete_after=self.settings[server.id]["delete_delay"])
[ "def removes_channel(channel):", "async def _watignore_channel(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings['ignore_channels']:\n self.settings['ignore_channels'].remove(channel.id)\n await self.bot.say(\"wut? Ok, I will no longer \"\n \"ignore this channel.\")\n else:\n self.settings['ignore_channels'].append(channel.id)\n await self.bot.say(\"wat? Alright, I will ignore \"\n \"this channel.\")\n dataIO.save_json(self.settings_path, self.settings)", "def filter_channel(self, channel_list=None):\n if channel_list == None:\n return None\n all_ch = self.keys()\n for ch in all_ch:\n if ch not in channel_list:\n self.pop(ch)", "async def unignore(self, obj: Union[discord.TextChannel, discord.Member]):\n if not await self.is_ignored(obj):\n return False\n ignore_type = \"members\" if isinstance(obj, discord.Member) else \"channels\"\n async with self.guild_config.ignored.get_attr(ignore_type)() as i:\n i.remove(obj.id)\n return True", "async def on_channel_delete(self, channel):", "async def _blacklist_channel(self, ctx, chan:discord.Channel=None):\n if chan == None:\n chan = ctx.message.channel\n blacklist_path = self.discord_path('utils/cblacklist.txt')\n if chan.id in open(blacklist_path).read():\n with open(blacklist_path) as f:\n s = f.read().replace(chan.id + \"\\n\", '')\n with open(blacklist_path, \"w\") as f:\n f.write(s)\n await self.bot.say(\"ok, unblacklisted channel {0.mention} `<{0.id}>`\".format(chan))\n else:\n with open(blacklist_path, \"a\") as f:\n f.write('{0}\\n'.format(chan.id))\n await self.bot.say(\"ok, blacklisted channel {0.mention} `<{0.id}>`\".format(chan))", "async def remove_aschannel(\n self,\n ctx: commands.Context,\n channel: Union[discord.TextChannel, int]\n ) -> None:\n channel_id = channel.id if isinstance(channel, discord.TextChannel)\\\n else channel\n await settings.remove_aschannel(self.bot, channel_id, ctx.guild.id)\n await ctx.send(\n f\"Removed AutoStar Channel {channel}\"\n )", "async def removeblacklist(self, ctx):\n channel_mentions = ctx.message.channel_mentions\n\n if channel_mentions:\n out = ''\n for channel in channel_mentions:\n try:\n cp.remove_channel(ctx.guild.id, channel.id, cp.BLACKLIST_KEY)\n out += f'{channel.name}, '\n except ValueError:\n pass\n if len(out) < 2:\n out = 'No channels were '\n await ctx.send(f\"{out[:-2]} removed from blacklist!\")", "async def reset(self, ctx):\n await self.config.guild(ctx.guild).channel_whitelist.set([\"general\"])\n await self.config.guild(ctx.guild).channel_blacklist.set([])\n await ctx.send(\"Done\")", "async def remove(self, ctx, target_channel: discord.TextChannel):\n if not isinstance(target_channel, discord.TextChannel):\n await ctx.send(\"that is not a valid channel fam\", delete_after=4)\n return\n try:\n message_id = await self.bot.pg_controller.get_message_info(\n ctx.channel.id, target_channel.id)\n except Exception as e:\n await ctx.send(\"something broke\", delete_after=3)\n return\n if not message_id:\n return\n og_message = await ctx.channel.fetch_message(int(message_id))\n\n try:\n # removes the channel watching from the db\n await self.bot.pg_controller.rm_channel_chanreact(target_channel, ctx.channel.id)\n except:\n pass\n try:\n # resets the perms\n await target_channel.edit(sync_permissions=True)\n except:\n pass\n\n for i in range(len(self.bot.chanreact)):\n # removes the channel from the bot cacheing\n if self.bot.chanreact[i]['message_id'] == message_id and \\\n self.bot.chanreact[i]['host_channel'] == ctx.channel.id and \\\n self.bot.chanreact[i]['target_channel'] == target_channel.id:\n del self.bot.chanreact[i]\n break\n\n await og_message.delete()\n await self.bot.pg_controller.rem_channel_message(target_channel.id, ctx.channel.id) # removes the channel for user watching\n await ctx.message.delete()", "async def deleter(self, ctx):\r\n async with self.lock:\r\n channels = await self.conf.all_channels()\r\n sending = \"\"\r\n for c, data in channels.items():\r\n c = self.bot.get_channel(int(c))\r\n if c is None:\r\n continue\r\n if c.guild.id == ctx.guild.id and int(data[\"wait\"]) != 0:\r\n sending += f\"{c.mention}: {data['wait']} seconds\\n\"\r\n if sending:\r\n await ctx.send(sending)\r\n else:\r\n await ctx.send(\r\n f\"No channels are currently being tracked. Add one by using `{ctx.prefix}deleter channel`.\"\r\n )", "async def ignore(self, obj: Union[discord.TextChannel, discord.Member]):\n if await self.is_ignored(obj):\n return False\n ignore_type = \"members\" if isinstance(obj, discord.Member) else \"channels\"\n async with self.guild_config.ignored.get_attr(ignore_type)() as i:\n i.append(obj.id)\n return True", "def ignore_channel(self, channel_name):\n if channel_name in self.config.ignore_channels:\n return True\n for pat in self.config.ignore_channel_patterns:\n if re.search(pat, channel_name):\n return True\n return False", "def pop_empty_channels(self):\n\n murderlist = []\n\n for chname in self.channels:\n channel = self.channels[chname]\n\n if len(channel.clients) == 0:\n murderlist.append(channel)\n\n for victim in murderlist:\n del self.channels[victim.name]", "def clear_channels(self):\n self.write('CALCulate:PARameter:DELete:ALL')\n for submodule in self.submodules.values():\n if isinstance(submodule, ChannelList):\n submodule._channels = []\n submodule._channel_mapping = {}\n submodule._locked = False", "def clean_channel_image(self, channel_name):\r\n with self.channel_lock:\r\n for i in range(len(self.channel_list)):\r\n if self.channel_list[i].channel_name == channel_name:\r\n self.channel_list[i].image = None\r\n break", "def hugroom(self, irc, msg, args, channel):\n if not ircdb.checkCapability(msg.prefix, 'admin'):\n irc.reply(\"Permission Denied!\")\n return\n\n chanObj = irc.state.channels[channel]\n users = chanObj.users\n for user in users:\n if user in self.excludes:\n continue\n irc.reply(\"huggles {user}\".format(user=user))", "def remove_event_detect(cls, channel):\n cls.CALLBACKS.pop(channel, None)\n cls.MODES.pop(channel, None)", "def reset_channel(self, channel):\n self.channels_fired[channel] = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes current server from ignore list
async def unignore_server(self, ctx): server = ctx.message.server if server.id in self.ignore_list["SERVERS"]: self.ignore_list["SERVERS"].remove(server.id) dataIO.save_json(self._ignore_list_path, self.ignore_list) await self.bot.say("This server has been removed from the ignore list.", delete_after=self.settings[server.id]["delete_delay"]) else: await self.bot.say("This server is not in the ignore list.", delete_after=self.settings[server.id]["delete_delay"])
[ "async def _watignore_server(self, ctx):\n\n server = ctx.message.server\n if server.id in self.settings['ignore_servers']:\n self.settings['ignore_servers'].remove(server.id)\n await self.bot.say(\"wot? Ok boss, I will no longer \"\n \"ignore this server.\")\n else:\n self.settings['ignore_servers'].append(server.id)\n await self.bot.say(\"what? Fine, I will ignore \"\n \"this server.\")\n dataIO.save_json(self.settings_path, self.settings)", "async def RemoveBlackList(self, ctx, server):\r\n\t\tBL = self.BotConfig('BlacklistedServers')\r\n\t\tif int(server) in BL:\r\n\t\t\tBL.remove(int(server))\r\n\r\n\t\tself.BotConfig('BlacklistedServers', BL)\r\n\t\tawait ctx.send('Removed {} from Blacklisted servers'.format(server))", "async def _allowlist_remove(self, ctx: commands.Context, *servers: int):\n async with self.config.allowed() as settings:\n for server in servers:\n if server in settings:\n settings.remove(server)\n return await ctx.tick()", "async def _servers_remove(self, ctx: commands.Context, *servers: int):\n main_servers = await self.config.main_servers()\n allowed = await self.config.allowed()\n async with self.config.user(ctx.author).all() as user_settings:\n if user_settings[\"supporting_in\"]:\n for server in servers:\n if server in user_settings[\"servers\"]:\n if guild := self.bot.get_guild(server):\n if guild.id not in allowed and str(guild.id) not in main_servers.keys():\n await guild.leave()\n user_settings[\"servers\"].remove(server)\n else:\n await ctx.send(f\"`{server}` was not in your BotAccess servers!\")\n return await ctx.tick()\n else:\n return await ctx.send(await self.config.not_supporting() or NOT_SUPPORTING)", "def remove_available_server(self, host_ip):\n\t\tself.swarm_manager.remove_available_server(host_ip)", "def remove_server(self, server):\n assert(isinstance(server, MySQLServer))\n assert(server.group_id == self.__group_id)\n server.group_id = None", "def remove_client(self, client):\n self.clients_list.discard(client)", "def drop_nameservers(self):\n\t\twhile self._ldns_resolver.pop_nameserver():\n\t\t\tpass", "async def unwhitelist(self, ctx, guild: int):\n async with self.config.whitelisted() as w:\n try:\n index = w.index(guild)\n except ValueError:\n return await ctx.send(\"This is not a guild in the whitelist\")\n w.pop(index)\n self._whitelist = w\n await ctx.tick()", "def remove_client(self, name: str):\n print(\"Try to remove\", name)\n for client in self.list_of_server_clients:\n if client.get_name() == name:\n print(\"Deleting to\", name)\n try:\n del self.list_of_server_clients[self.list_of_server_clients.index(client)]\n except Exception as e:\n print('Error trying to remove', client)", "def _removeIgnoredModules(self, moduleNameList):\n\t\ttoRemoveList = []\n\t\tfor fileName in moduleNameList:\n\n\t\t\tfixedFileName = fileName\n\t\t\tif os.path.sep != '/':\n\t\t\t\tfixedFileName = fileName.replace(os.path.sep, '/')\n\t\t\tfor ignoreName in self.ignore:\n\t\t\t\t#if ignoreName == os.path.basename(fileName):\n\t\t\t\tif ignoreName == fixedFileName:\n\t\t\t\t\ttoRemoveList.append(fileName)\n\t\tfor moduleName in toRemoveList:\n\t\t\tmoduleNameList.remove(moduleName)\n\t\treturn moduleNameList", "def remove_server():\n\n if not authorized():\n redirect(url + '/login')\n\n if not admin():\n redirect(url + '/denied')\n\n db.execute(\"DELETE FROM servers WHERE name = '{0}'\".format(request.query.server))\n master_db.commit()\n\n log.execute(\"INSERT INTO events VALUES ('Server {0} removed','Server Removed','{1}','{2}')\"\n .format(request.query.server, current_user(), datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")))\n log.execute(\"DELETE FROM servers WHERE name = '{0}'\".format(request.query.server))\n logs_db.commit()\n\n redirect(url + '/')", "async def on_server_remove(self, server: discord.Server):\n log.info('Server %s has been removed', server.name)\n self.server_manager.remove(server)", "def test_remove_controlm_server(self):\n pass", "async def unignore_channel(self, ctx, channel: discord.Channel=None):\r\n current_ch = ctx.message.channel\r\n server = ctx.message.server\r\n if not channel:\r\n if current_ch.id in self.ignore_list[\"CHANNELS\"]:\r\n self.ignore_list[\"CHANNELS\"].remove(current_ch.id)\r\n dataIO.save_json(self._ignore_list_path, self.ignore_list)\r\n await self.bot.say(\"This channel has been removed from the ignore list.\", delete_after=self.settings[server.id][\"delete_delay\"])\r\n else:\r\n await self.bot.say(\"This channel is not in the ignore list.\", delete_after=self.settings[server.id][\"delete_delay\"])\r\n else:\r\n if channel.id in self.ignore_list[\"CHANNELS\"]:\r\n self.ignore_list[\"CHANNELS\"].remove(channel.id)\r\n dataIO.save_json(self._ignore_list_path, self.ignore_list)\r\n await self.bot.say(\"Channel removed from ignore list.\", delete_after=self.settings[server.id][\"delete_delay\"])\r\n else:\r\n await self.bot.say(\"That channel is not in the ignore list.\", delete_after=self.settings[server.id][\"delete_delay\"])", "async def _watignore_channel(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings['ignore_channels']:\n self.settings['ignore_channels'].remove(channel.id)\n await self.bot.say(\"wut? Ok, I will no longer \"\n \"ignore this channel.\")\n else:\n self.settings['ignore_channels'].append(channel.id)\n await self.bot.say(\"wat? Alright, I will ignore \"\n \"this channel.\")\n dataIO.save_json(self.settings_path, self.settings)", "def hostNames(self):\r\n self.serverNames = [x for x in self.serverNames if \\\r\n x.name_type != NameType.host_name]", "def clear_ignore_patterns(self):\n self._ignored_patterns[:] = []", "def remove_ws(self, server_address):\n for ws in self.wsList:\n if ws.url == server_address:\n self.wsList.remove(ws)\n ws.close()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of words between the start and stop word
def count_words_between(start, stop, words): word_list = words.lower().split(' ') count = 0 counting = False for word in word_list: if word == stop.lower(): return count if counting: count += 1 if word == start.lower(): counting = True return "One of the start or stop words was not in the sentence."
[ "def word_count(self):\n\n # Split by non-alphanumerical boundaires\n split_text = re.split('\\W',self.text.lower())\n\n # Count occurences\n counts = {}\n for word in split_text:\n if word:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def wordcount(s):\r\n return len(s.split())", "def get_words_counts(corpus, boundaries):\n counts = Counter()\n cur_w = \"\"\n for i in range(1, len(boundaries)):\n cur_w += corpus[i-1]\n if boundaries[i] == 1:\n counts[cur_w] += 1\n cur_w = \"\"\n return counts", "def count_words(line):\n return len(line.split())", "def count_occurences_in_text(word: str, text: str) -> int:\n\n text = text.lower()\n word = word.lower()\n N = len(word) # length of a searching word\n\n w_count = 0 # number of occurrences\n pos = 0 # starting position of text\n while True:\n n = text.find(word, pos) # index of first occurrence\n if n >= 0:\n pos = n + N\n if (text[n-1:n] in separator or text[n-2:n].__eq__(\"''\")) and (text[pos:pos+1] in separator or text[pos:pos+2].__eq__(\"''\")):\n w_count += 1\n else:\n break\n\n return w_count", "def cmd_wordscount(msg):\n\n global WORDSCOUNT\n\n return state.done(\"Actual word count is: %d words\" % WORDSCOUNT)", "def words_instance_dist(document, words):\n text = document.get_tokenized_text()\n output = []\n count = 0\n start = False\n\n for token in text:\n token = token.lower()\n if not start:\n if token in words:\n start = True\n else:\n count += 1\n if token in words:\n output.append(count)\n count = 0\n return output", "def get_nb_words(doc):\n return len([token for token in doc if (token.pos_ not in IGNORED_POS) and (token.text != \"%\")])", "def count_words(phrase):\n\n word_counts = {}\n\n for word in phrase.split(' '):\n if word in word_counts:\n word_counts[word] += 1\n else:\n word_counts[word] = 1\n\n return word_counts", "def count_word(tick_sequence: str, pattern: str):\r\n return len(re.findall(r\"(?={0})\".format(pattern), tick_sequence))", "def count(string, match, start=None, end=None, case=True):\n if match == \"\":\n return len(string) + 1\n count = 0\n if not start:\n start = 0\n if not end:\n end = len(string)\n if not case:\n string = string.lower()\n match = match.lower()\n while True:\n if string.find(match, start, end) != -1:\n count += 1\n start = string.find(match, start) + len(match)\n else:\n break\n return count", "def count_words(line):\n r = re.findall(\"[a-zA-Z_]+\", line)\n return len(r)", "def filter_wordscount(msg):\n\n global WORDSCOUNT\n\n WORDSCOUNT += len(msg.split())\n\n return state.done(msg)", "def get_word_counts(docs):\n pass", "def get_whole_word_span(tokens, start_index):\n end_index = len(tokens)\n if start_index < len(tokens) - 1:\n end_index = min(start_index + 1, len(tokens) - 1)\n while tokens[end_index].startswith(\"##\"):\n end_index += 1\n if end_index > len(tokens) - 1:\n break\n\n while tokens[start_index].startswith(\"##\") and start_index > 0:\n start_index -= 1\n\n return start_index, end_index", "def get_wordcount(page):\n soup = BeautifulSoup(page.text, 'html.parser')\n words = soup.get_text().split()\n return len(words)", "def total_occurrences(self, haystack, needle, start=0):\n k = 0\n start = haystack.find(needle, start)\n while start >=0:\n k += 1\n start = haystack.find(needle, start+len(needle))\n return k", "def __count_words(self) -> None:\n self.n_words = len(self.data.split())", "def count_words(contents):\n print(\"Count words\")\n counts = {}\n for word in contents:\n word = word.lower()\n word = word.translate(str.maketrans(\"\", \"\", string.punctuation))\n if not word in counts:\n counts[word] = 0\n counts[word] += 1\n return counts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws the state of the game to the drawing surface
def draw_game(self): self.surface.fill((0, 0, 0)) self.ship.draw(self.surface) self.aliens.draw(self.surface) pygame.display.flip() # update the surface
[ "def draw(self):\n\n if self.finish:\n self.draw_end_screen()\n else:\n pyxel.cls(COL_BACKGROUND)\n self.sparkler.display()\n self.l_paddle.display()\n self.r_paddle.display()\n self.pickups.display()\n self.ball.display()\n self.draw_score()", "def draw(self):\n if self.alive:\n self.surface.set_at((self.x, self.y), self.dude_colour)", "def draw(self) -> None:\n if SHOW_OUTLINE:\n pg.draw.rect(self._screen, RED, self._rect, width=1)\n pg.draw.rect(self._screen, self._bg_color, self._rect)\n pg.draw.rect(self._screen, GRAY, self._rect, width=1)\n for _, sb in self._scoreboxes.items():\n sb.draw()\n\n pg.display.update(self._rect)", "def draw_game_state(self):\n info_0_x, info_0_y = self.coord['info_0_x'], self.coord['info_0_y']\n data = [\n [f'Game ID: {self.game_id}', 10],\n [f'Your Token: {self.access_token}', 10],\n ['-' * 55, 14],\n [f'cards to take: {self.cards_to_take}', 14],\n [f'turns to wait: {self.turns_to_wait}', 14],\n [f'requests: color: {self.requested_color}, value: {self.requested_value}', 14]\n ]\n for index, info in enumerate(data):\n info_y = info_0_y - index * 20\n label = pyglet.text.Label(text=info[0], x=info_0_x, y=info_y,\n color=self.colors['lbl_menu'], font_size=info[1])\n self.draw_objects.append(label)\n\n name = choice(['red_joker.png', 'black_joker.png'])\n if self.requested_value is not None:\n name = f'hearts_{self.requested_value}.png'\n elif self.requested_color is not None:\n name = f'{self.requested_color}_A.png'\n\n card_image = common.resize_center_card_image(self.card_images[name], self.screen.height, 4)\n info_y = info_0_y - 20 * len(data) - card_image.height / 1.9\n card = pyglet.sprite.Sprite(img=card_image, x=info_0_x + card_image.width * 1.3, y=info_y)\n self.draw_objects.append(card)", "def draw(self):\r\n self._clear(self.mainWindow)\r\n self.drawBoard()\r\n self.createControls()\r\n self.createPlayerInfo()\r\n self.createGameLog()", "def draw(self):\n self.mode.draw()", "def drawGame(self):\n # erase the old image\n self.game_image.fill(gui.QColor(0, 0, 0, 0))\n # paint the new image\n painter = gui.QPainter(self.game_image)\n painter.drawImage(0, 0, self.world_image)\n painter.drawImage(0, 0, self.object_image)\n painter.end()", "def on_draw(self):\n self.clear()\n self.arch.draw()\n self.bullet.draw()\n\tfps_display.draw()", "def draw(self):\n self.model._draw_players()\n pygame.display.update()", "def draw_on_screen(self, screen: pygame.Surface):", "def draw(self):\n self.__game.query(self.__color, Player.queryDraw)", "def draw(self):\n self.screen.blit(self.title_image, self.title_rect)\n self.screen.blit(self.subtitle_image, self.subtitle_rect)\n\n self.play_button.draw()\n self.hs_button.draw()", "def Draw(self, screen):\n screen.blit(self.backgroundImage, (0, -150) )\n self.wheel.Draw( screen )\n self.board.Draw(screen)\n self.scoreboard.Draw(screen)", "def draw_state(screen, game_state, valid_positions, selected_square):\n draw_board(screen)\n highlight_selection(screen, game_state, valid_positions, selected_square)\n draw_pieces(screen, game_state.board)", "def draw(self, canvas):\n for state in self.states:\n state.draw(canvas)", "def draw(self):\r\n self.shake()\r\n service.screen.blit(self.image, (self.rect.x, self.rect.y))", "def draw(self):\r\n if not self.ate_apple:\r\n pygame.draw.rect(window, self.RGB, (self.x, self.y, self.width, self.height))", "def draw(self):\n self.scene.clear()\n self.draw_image(self.background)\n\n # shuffle so that we also see blue units\n state = self.simulation.units(self.state)\n for unit in state:\n if not unit.is_dead:\n if not unit.is_centurion:\n color = self.gen_color(self.colormap, unit)\n else:\n if unit.side == 0:\n color = QColor(255,0,0)\n else:\n color = QColor(0,0,255)\n self.draw_unit(unit, QPen(), QBrush(color))\n self.draw_unit(state[self.selected_unit], QPen(), QBrush(QColor(0, 255, 0)))", "def redraw(self):\r\n self.drawBoard()\r\n self.createPlayerInfo()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the ship by 'step' (Vector2)
def move(self, step): self.position += step * self.speed
[ "def move(self):\n self.steps += 1\n direction = uniform(0, 1)\n if direction < 0.5:\n self.position -= 1\n else:\n self.position += 1", "def step(self):\n tmp = self.path[-1].copy()\n tmp += self.direction\n self.path.append(tmp)\n self.update_direction()", "def move(self, duration, xstep, ystep): \n \n #self.enableMotors()\n cmd = ('SM,%d,%d,%d\\r' %(duration, xstep, ystep))\n self.doCommand(cmd)\n #self.disableMotors()\n logger.info('Command sent: move x:%d y:%d in steps' % (xstep, ystep))", "def step(self, x):", "def ship_turn(self, angle):\r\n self.__direction += angle", "def move( self, move_vector ):", "def step(self):\n pos = self.motor.pos() / self.gear_ratio\n print pos\n if abs(pos - self.goal_pos) <= 5:\n print \"holding\"\n self.motor.resetPos()\n self.motor.hold()\n self.at_pos = True\n elif pos > self.goal_pos + 5:\n print \"Turning positive\"\n self.motor.setSpeed(-5)\n elif pos < self.goal_pos - 5:\n print \"Turning negative\"\n self.motor.setSpeed(5)", "def step_forward(self) -> None:\n self._time += self.step_size", "def update(self, time_step):\r\n self.position.propagate(self.velocity, time_step)", "def move(self):\r\n if self.get_direction == c.UP:\r\n return self.macg.step_up()\r\n elif self.get_direction == c.DOWN:\r\n return self.macg.step_down()\r\n elif self.get_direction == c.LEFT:\r\n return self.macg.step_left()\r\n elif self.get_direction == c.RIGHT:\r\n return self.macg.step_right()\r\n elif self.get_direction == c.LEAVE:\r\n exit()", "def increment_step(self):\n self.current_step += 1", "def move(self, dx, dy, *args):\n self.x += dx\n self.y += dy\n return True", "def drive_single_step(self, agent, delta_time):\n self._change_direction(agent, delta_time / 1000.0)\n displacement = vec2d( \n agent._direction.x * agent._vmax * (delta_time),\n agent._direction.y * agent._vmax * (delta_time)) \n agent.prev_pos = vec2d(agent._position)\n agent.position += displacement", "def move(self):\n if random.randint(0, 1) == 0:\n if self.position > self.left_limit:\n self.position -= 1\n\n else:\n if self.position < self.right_limit:\n self.position += 1\n self.steps += 1", "def setup_next_step(self, istep=None):\n if not self._init:\n print '*** Need to init_move before beginning ***'\n return 0\n \n if not istep:\n istep = self.istep+1\n\n if istep > self.nsteps:\n print 'Move Completed'\n return 0\n else:\n ok_limits = True\n current_vals = self.epicsLive.get_all('VAL')\n desired_vals = self.position_dict\n for attr in self.epicsLive._attrs:\n mot = self.epicsLive.get_device(attr)\n desired_pos = self.scan_array[attr][istep]\n tweek_val = self.scan_step[attr]\n current_pos = mot.get_position()\n mot.put('TWV', abs(tweek_val), wait=True)\n mot.put('VELO', abs(tweek_val)/self.step_time, wait=True)\n if desired_pos > current_pos:\n mot.put('HLM', desired_pos+0.1, wait=True)\n mot.put('LLM', current_pos-0.01, wait=True)\n else:\n mot.put('HLM', current_pos+0.01, wait=True)\n mot.put('LLM', desired_pos-0.1, wait=True)\n \n time.sleep(0.2)\n # Make sure next step is within the soft limits \n ok_limits = ok_limits and mot.within_limits(current_pos+tweek_val) \n if not ok_limits:\n print attr, current_pos, desired_pos, desired_pos-current_pos, tweek_val\n print attr, mot.get('HLM'), mot.get('LLM'), current_pos+tweek_val\n print attr, 'step not valid'\n \n # Make sure motor is not at the lower limit switch.\n if mot.get('LLS'):\n print attr, 'is at the lower limit switch!!!'\n print attr, 'step not valid'\n ok_limits = 0 \n\n # Make sure motor is not at the upper limit switch.\n if mot.get('HLS'):\n print attr, 'is at the upper limit switch!!!'\n print attr, 'step not valid'\n ok_limits = 0\n \n if not ok_limits:\n self.epicsLive.disable_all()\n self.set_parameter(_move_ready=False)\n else:\n self.set_parameter(_move_ready=True)\n\n return ok_limits", "def move_snake(self, direction):\n self.has_eaten = False\n self.direction = direction\n self.update_tail()\n moved_x, moved_y = self.get_movement(direction)\n \n self.snake_list[0][0] += moved_x\n self.snake_list[0][1] += moved_y\n self.x = self.snake_list[0][0]\n self.y = self.snake_list[0][1]", "def move(self):\n for segment in range(len(self.snake)):\n if segment == len(self.snake) - 1:\n self.snake[segment].forward(20)\n else:\n self.snake[segment].goto(self.snake[segment + 1].pos())", "def move(self, start_point, end_point):\r\n vector = (end_point[0] - start_point[0], end_point[1] - start_point[1], end_point[2] - start_point[2])\r\n self.move_by_vec(vector)", "def step(self, steps):\n if steps == 0:\n self.state = motor_state.STOP\n return\n\n if steps < 0:\n self.state = motor_state.REVERSE\n elif steps > 0:\n self.state = motor_state.FORWARD" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
By chance generate an alien at a random position at the top of 'surface'
def generate_alien(self, surface): if random.random() < self.generation_chance: size = surface.get_size() position = pygame.Vector2(random.randint(0, size[0]), 0) self.aliens.append(Alien(position))
[ "def random(self):\r\n if self.ate_apple:\r\n self.x = 20 * random.randint(0, 23)\r\n self.y = 20 * random.randint(3, 23)", "def _randomize_asteroid(self):\n def randomize(vel):\n return vel * rand.choice([1, -1]) * rand.uniform(.5, 2)\n # randomly choose an image\n self.base_img, self.rect = self.fleet.get_random_image()\n # randomize size\n self.scale = rand.uniform(.25, 1.25)\n # randomize velocities\n self.vel_x = randomize(self.base_vel)\n self.vel_y = randomize(self.base_vel)\n self.rotation_vel = randomize(self.base_rotation_vel)", "def rand_location(minimum, maximum):\n return pygame.math.Vector2(rand(minimum, maximum), rand(minimum, maximum))", "def spawn_alien(aliens):\n x = random.choice([0, 100, 200, 300, 400, 500, 600, 700])\n y = random.choice([1, 0]) * 75\n for alien in aliens:\n if alien.x == x and alien.y == y:\n return spawn_alien(aliens)\n return [x, y]", "def random_point(boundary):\n\tpass", "def getRandomPose(Pose): # real signature unknown; restored from __doc__\n pass", "def getRandomPipe(): \r\n pipeHeight = GAME_SPRITES['pipe'][0].get_height() #Seedhe wala yaan ulta can be choosed as they have same height ..say using seedha wala from GAME_SPRITES['pipe'][0] \r\n offset = SCREENHEIGHT / 3 # <-----------------------------------------------------IMP\r\n\r\n y2 = offset + random.randrange(0 , int(SCREENHEIGHT - GAME_SPRITES['base'].get_height() - 1.2*offset)) #y cordinate of lower pipe\r\n pipeX_cordinate = SCREENWIDTH + 10 #It is same for both pipes \r\n y1 = pipeHeight - y2 + offset #y cordinate of upper pipe\r\n\r\n pipe = [\r\n {'x' :pipeX_cordinate ,'y' : -y1}, # upper pipe [0]\r\n {'x' :pipeX_cordinate ,'y' : y2} #lower pipe [1]\r\n ]\r\n\r\n return pipe #Returning the list of co-ordinates of upper and lower pipes\r", "def generate_particle(info):\n\tdim = info['dimension']\n\tsol = info['lower'] + rand(dim) * (info['upper'] - info['lower'])\n\treturn sol", "def generate_random_position():\n return random.randrange(100), random.randrange(100), random.randrange(100)", "def randomrotate(xyz):\n # get random point on unit sphere\n axis = randn(3)\n axis = axis / norm(axis)\n angle = 2 * pi * rand()\n return rotate(axis, angle, xyz)", "def pickDirection():\n turtle.right(random.randrange(-1*MAX_ANGLE(),MAX_ANGLE()))", "def random_move(self):\n\t\toptions = [90, 180, 270]\n\t\tang = randint(0,2)\n\t\tn = randint(2, self.length - 1)\n\t\tself.rotate(n, radians(options[ang]))", "def spice_bloom(self, x=-1, y=-1):\n w, h = self.surface.get_size()\n if x < 0 or x >= w or y < 0 or y >= h:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n r, g, b, a = self.surface.get_at((x, y))\n while (r, g, b) != self.sand_colour and (r, g, b) != self.spice_colour:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n r, g, b, a = self.surface.get_at((x, y))\n if (r, g, b) == self.sand_colour:\n self.surface.set_at((x, y), self.spice_colour)\n else:\n if self.surface.get_at((x+1, y)) == self.sand_colour:\n self.surface.set_at((x+1, y), self.spice_colour)\n if self.surface.get_at((x+1, y+1)) == self.sand_colour:\n self.surface.set_at((x+1, y+1), self.spice_colour)\n if self.surface.get_at((x, y+1)) == self.sand_colour:\n self.surface.set_at((x, y+1), self.spice_colour)\n if self.surface.get_at((x-1, y+1)) == self.sand_colour:\n self.surface.set_at((x-1, y+1), self.spice_colour)\n if self.surface.get_at((x-1, y)) == self.sand_colour:\n self.surface.set_at((x-1, y), self.spice_colour)\n if self.surface.get_at((x-1, y-1)) == self.sand_colour:\n self.surface.set_at((x-1, y-1), self.spice_colour)\n if self.surface.get_at((x, y-1)) == self.sand_colour:\n self.surface.set_at((x, y-1), self.spice_colour)\n if self.surface.get_at((x+1, y-1)) == self.sand_colour:\n self.surface.set_at((x+1, y-1), self.spice_colour)", "def __init__(self):\n self.y = random.randint(0, 99)\n self.x = random.randint(0, 99)", "def randomanglerotate(axis, xyz):\n angle = 2 * pi * rand()\n return rotate(axis, angle, xyz)", "def random_pos(self):\n temp = 0\n while temp < len(self.objects):\n line = random.randint(0, 14)\n column = random.randint(0, 14)\n if self.object_position(line, column, self.objects[temp].letter):\n temp += 1", "def random_pipe_pair(pipe_end_img, pipe_body_img):\n surface = pygame.Surface((PIPE_WIDTH, WIN_HEIGHT), SRCALPHA)\n surface.convert() # speeds up blitting\n surface.fill((0, 0, 0, 0))\n max_pipe_body_pieces = int(\n (WIN_HEIGHT - # fill window from top to bottom\n 3 * BIRD_HEIGHT - # make room for bird to fit through\n 3 * PIPE_PIECE_HEIGHT) / # 2 end pieces and 1 body piece for top pipe\n PIPE_PIECE_HEIGHT # to get number of pipe pieces\n )\n bottom_pipe_pieces = randint(1, max_pipe_body_pieces)\n top_pipe_pieces = max_pipe_body_pieces - bottom_pipe_pieces\n # bottom pipe\n for i in range(1, bottom_pipe_pieces + 1):\n surface.blit(pipe_body_img, (0, WIN_HEIGHT - i*PIPE_PIECE_HEIGHT))\n bottom_pipe_end_y = WIN_HEIGHT - bottom_pipe_pieces*PIPE_PIECE_HEIGHT\n surface.blit(pipe_end_img, (0, bottom_pipe_end_y - PIPE_PIECE_HEIGHT))\n # top pipe\n for i in range(top_pipe_pieces):\n surface.blit(pipe_body_img, (0, i * PIPE_PIECE_HEIGHT))\n top_pipe_end_y = top_pipe_pieces * PIPE_PIECE_HEIGHT\n surface.blit(pipe_end_img, (0, top_pipe_end_y))\n # compensate for added end pieces\n top_pipe_pieces += 1\n bottom_pipe_pieces += 1\n return PipePair(surface, top_pipe_pieces, bottom_pipe_pieces)", "def move(self):\r\n if random.random() < 0.5:\r\n self._y = (self._y + 1) % 300\r\n else:\r\n self._y = (self._y - 1) % 300\r\n \r\n if random.random() < 0.5:\r\n self._x = (self._x + 1) % 300\r\n else:\r\n self._x = (self._x - 1) % 300", "def random(position=[0,0],size=[10,10]):\n paint=Paint(position,size)\n return paint" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves all Aliens in this container
def move(self): for alien in self.aliens: alien.move()
[ "def aliMove(self):\n listr = []\n listl = []\n for row in self._aliens:\n for alien in row:\n if alien != None:\n listr = listr + [alien.right]\n listl = listl + [alien.left]\n self.moveAlien(listr, listl)", "def _move_asteroids(self):\n for asteroid in self.__asteroids:\n asteroid.move(*self.__screen_dimensions)", "def ship_mover(self):\n for ship in self.__ships:\n ship.move()", "def move(self):\r\n # move agents\r\n for agent in self.agents:\r\n agent.move(self.agents)", "def simulate_move(self):\n for atom in self.list_of_atoms:\n atom.move(self.grid)", "def reset_movement(self, owner: str):\n for unit in self.players[owner].units:\n unit.reset_movement()", "def move_asteroids(self):\n list = self.game.get_asteroids()\n for i in range(len(list)):\n x = list[i].get_x_cor()\n y = list[i].get_y_cor()\n self.move_object(list[i])", "def reset(self):\n self.aliases = {}", "def update_swarm(self):\n # move each ant\n for i in range(0, self.num_ants):\n \"\"\" make local choice and move each ant \"\"\"\n # adjust directional bias to the orientation of the ant\n orientation_bias = self.directional_bias(self.ants[i][2])\n # get pheromone bias vector from pheromone field\n pheromone_bias = self.local_pheromone_weights(i)\n # combine biases\n bias = np.multiply(orientation_bias, pheromone_bias)\n # chose the next direction (angle) to move ...\n new_angle = self.weighted_choice(bias)\n # and update the direction of the ant ...\n self.ants[i][2] = new_angle\n # and get the correponding edge vector\n change = self.edge_vectors[new_angle]\n # update the lattice location of the ant\n new_x = self.ants[i][0] + change[0]\n new_y = self.ants[i][1] + change[1]\n # apply toroidal boundary conditions\n self.ants[i][0], self.ants[i][1] = self.apply_bcs(new_x, new_y)", "def new_aliens_collection(positions):\n #aliens = []\n #for (x, y) in positions:\n # aliens.append(Alien(x,y))\n #\n #return aliens\n\n return map(lambda position: Alien(*position), positions)", "def _move_torpedos(self):\n for torpedo in self.__torpedos:\n torpedo.move(*self.__screen_dimensions)", "def move_beads_using_links(beads, links):\n for bead in range(1, len(beads)):\n beads[bead,:] = beads[bead-1,:] + links[bead-1]", "def _move_obstacles(self):\n\n for obstacle_set in self.obstacle_manager:\n for obstacle in obstacle_set:\n obstacle.move()", "def change_fleet_direction(ai_settings, lynels):\n for lynel in lynels.sprites():\n lynel.rect.y += ai_settings.horde_drop_speed\n ai_settings.horde_direction *= -1", "def update_asteroids(self):\n for ast in self.__asteroids:\n # Move asteroid to next position\n self.set_next_position(ast)\n position = ast.position\n self.__screen.draw_asteroid(ast, position[0], position[1])\n\n # Check if we hit a ship, and if yes\n # remove the asteroids, decrease life\n # and show message\n if ast.has_intersection(self.__ship):\n self.__ship.decrease_life()\n self.__screen.remove_life()\n self.__screen.show_message(HIT_TITLE, HIT_MESSAGE)\n self.__asteroids.remove(ast)\n self.__screen.unregister_asteroid(ast)", "def __clearTargets(self):\n log(\"MState __clearTargets\",4)\n for p in self.players:\n p.target = None\n self.mafia_target = None", "def clearAll(self):\n for position in list(self.images):\n self.removePiece(position)", "def _add_aliases(cls,obs,aliases):\n for a in aliases:\n cls._alias_map[a.lower()] = obs.name\n for o in cls._registry.values():\n obs_aliases = []\n for alias, name in cls._alias_map.items():\n if name == o.name:\n obs_aliases.append(alias)\n o._aliases = obs_aliases", "def _move_all_audio(self, archive_root, dest_root):\n for directory in self.file_path_filter.filter(\n filter(\n lambda file_like: Path.is_dir(file_like),\n archive_root.iterdir(),\n )\n ):\n dest_dir = dest_root / directory.name\n self._move_audio_in_dir(directory, dest_dir)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw all Aliens in this container
def draw(self, surface): for alien in self.aliens: alien.draw(surface)
[ "def draw(self):\n for z in Zulu:\n z.shape.draw(z.position,z.color)", "def _draw_asteroids(self):\n for asteroid in self.__asteroids:\n x, y = asteroid.get_coordinates()\n self.__screen.draw_asteroid(asteroid, x, y)", "def draw_all(self):\n pass", "def aliHelper(self):\n list1 = []\n k = 1\n l = 0\n m = 0\n for i in range(ALIEN_ROWS):\n list2 = []\n if k < 2:\n img = ALIEN_IMAGES[2]\n k = k + 1\n elif l < 2:\n img = ALIEN_IMAGES[1]\n l = l + 1\n elif m < 1:\n img = ALIEN_IMAGES[0]\n m = m + 1\n else:\n img = ALIEN_IMAGES[0]\n m = 0\n l = 0\n k = 0\n\n for j in range(ALIENS_IN_ROW):\n alien = Alien(x =((j+1)*ALIEN_H_SEP + (ALIEN_WIDTH / 2) +\n (ALIEN_WIDTH * j)), y = (GAME_HEIGHT - ((ALIEN_CEILING) +\n (ALIEN_HEIGHT / 2) + (i * ALIEN_HEIGHT)+ (ALIEN_V_SEP * i))),\n width = ALIEN_WIDTH, height = ALIEN_HEIGHT, source = img)\n list2 = list2 + [alien]\n t = list2[:]\n list1 = list1 + [t]\n return list1", "def new_aliens_collection(positions):\n #aliens = []\n #for (x, y) in positions:\n # aliens.append(Alien(x,y))\n #\n #return aliens\n\n return map(lambda position: Alien(*position), positions)", "def prep_alien_types(self):\n alien_x = self.screen_rect.centerx - 155\n alien_y = self.subtitle_rect.bottom + self.settings.screen_padding * 2\n\n # Create all alien types\n self.aliens = [None] * 4\n for i in range(4):\n self.aliens[i] = Alien(self.settings, self.screen, i,\n alien_x, alien_y)\n alien_y += self.aliens[0].rect.width + self.settings.screen_padding\n\n self.prep_alien_scores()", "def draw_all_units(self):\n units = self.window.database.MapModel.get_all_units()\n for unit in units:\n self.draw_unit_on_map(unit)", "def drawOverlays(self):\r\n\t\tpass", "async def aliases_command(self, ctx: Context) -> None:\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"• `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )", "def create_alien(ai_settings, screen, aliens, alien_number, row_number):\n if row_number < 2:\n alien_type = 1\n elif row_number < 4:\n alien_type = 2\n else:\n alien_type = 3\n alien = Alien(ai_settings, screen, alien_type)\n alien_width = alien.rect.width\n alien.x = alien_width + 1.25 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 1.25 * alien.rect.height * row_number\n alien.rect.y += int(ai_settings.screen_height / 8)\n aliens.add(alien)", "def draw(self):\n for boid in self.boids:\n boid.draw()", "def _drawYZentities(self):\n pass", "def draw_unit(self, active_units):\n for unit in active_units:\n active_units.draw(self.screen)", "def _draw_all(self) -> None:\n self._draw_player()\n self._draw_world()", "def drawObjects(self):\r\n\t\tpass", "def _create_alien(self, alien_number, row_number):\n alien = Alien(self) # Instantiate alien\n alien_width, alien_height = alien.rect.size # Set alien size\n # set alien horizontal location\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x # set alien horizontal coordinates\n # set alien vertical coordinates\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n self.aliens.add(alien) # add current alien to list of aliens", "def aliases(context, build):\n logger.info(\"Running scout view aliases\")\n adapter = context.obj['adapter']\n \n alias_genes = adapter.genes_by_alias(build=build)\n click.echo(\"#hgnc_symbol\\ttrue_id\\thgnc_ids\")\n for alias_symbol in alias_genes:\n info = alias_genes[alias_symbol]\n # pp(info)\n click.echo(\"{0}\\t{1}\\t{2}\\t\".format(\n alias_symbol,\n (alias_genes[alias_symbol]['true'] or 'None'),\n ', '.join([str(gene_id) for gene_id in alias_genes[alias_symbol]['ids']])\n )\n )", "def draw(self, view):\n for i in self._bricks:\n i.draw(view)\n self._paddle.draw(view)\n self._image1.draw(view)\n self._image2.draw(view)\n self._image3.draw(view)\n if self._ball is not None:\n self._ball.draw(view)", "def __init__(self, ai_settings, screen, image):\n super(Alien, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n self.counter = 0\n\n # Load the alien image and set its rect attribute\n self.image = image\n self.rect = self.image.get_rect()\n\n # Start each new alien near the top left of the screen\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n # Store the alien's exact position\n self.x = float(self.rect.x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if 'ship' is in collision with any of the Aliens in this container
def has_collision(self, ship): for alien in self.aliens: if alien.has_collision(ship): return True return False
[ "def check_aliens_ship_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n ship_alien_collision = pygame.sprite.spritecollideany(ship, aliens)\n alien_on_the_bottom = alien_on_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)\n if ship_alien_collision or alien_on_the_bottom:\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)", "def InShip(ships, x, y):\n coord = (x, y)\n for ship in ships:\n if coord in ship: \n return True\n return False", "def has_collision(self, ship):\n distance = (self.position - ship.position).length()\n return distance < self.radius + ship.radius", "def gameover(self):\n for ship in self.ships_list:\n for coordinate in ship.ship_coordinates():\n if coordinate not in self.hits_lists:\n return False\n return True", "def ships_overlap(ship1, ship2):\n for ship1_coord in ship1.ship_coordinates():\n for ship2_coord in ship2.ship_coordinates():\n if ship1_coord == ship2_coord:\n return True\n return False", "def _check_bullet_alien_ship_collisions(self):\n # Remove the alien ship if a bullet hits.\n collision = pg.sprite.groupcollide(self.bullets, self.fleet, True, True)\n if collision:\n self.stats.score += self.settings.point\n self.sb.prep_score()\n self.sb.check_high_score()\n\n if not self.fleet:\n self.bullets.empty()\n self._create_fleet()\n self.settings.increase_speed()", "def CheckShipBoundaries(ships):\n ships_copy = list(ships)\n while(len(ships_copy)): # compare each ships coords to each other\n ship = ships_copy.pop() # ships coords.\n\n for acoord in ship:\n for other_ship in ships_copy:\n for bcoord in other_ship:\n a = abs(acoord[0]-bcoord[0]) # Distance on X-axis\n b = abs(acoord[1]-bcoord[1]) # Distance on Y-axis\n\n # same row or column\n if (a==0 and b<2) or (a==0 and b<2):\n return False\n else:\n # distance from a to b calculated by Pythagorus.\n if math.sqrt(a**2 + b**2) < 2:\n return False\n return True", "def _check_alien_ship_hits_bottom(self):\n screen_rect = self.screen.get_rect()\n for alien_ship in self.fleet:\n if alien_ship.rect.bottom >= screen_rect.bottom:\n # Treat as if the ship got hit.\n self._losing_ship_response()\n break", "def check_for_collision(self):\n torpedo_list = self.game.get_torpedos()\n asteroid_list = self.game.get_asteroids()\n for asteroid in asteroid_list:\n for torpedo in torpedo_list:\n if (self.game.intersect(torpedo,asteroid)):\n self.collision(torpedo,asteroid)", "def ship_hits_asteroid(self):\n for each_asteroid in self.__asteroids_list:\n if each_asteroid.has_intersection(self.__ship):\n self._screen.show_message(HIT_TITLE, HIT_MESSAGE)\n self.__ship.reduce_health()\n self._screen.remove_life()\n self._screen.unregister_asteroid(each_asteroid)\n self.__asteroids_list.remove(each_asteroid)", "def is_any_dead(Gameboard):\n\t\tfor ship in Ship.ships:\n\t\t\thit_counter = 0\n\t\t\tif ship.alive:\t\t\t\t\n\t\t\t\tfor i in range(ship.length):\n\t\t\t\t\tif Gameboard.visibleGameboard[ship._shipCoordinatesY[i]][ship._shipCoordinatesX[i]] == \"X\":\n\t\t\t\t\t\thit_counter += 1\n\n\t\t\t\tif hit_counter == ship.length:\n\t\t\t\t\tship.alive = False\n\t\t\t\t\tShip.AliveShips -= 1\n\t\t\t\t\treturn True\n\t\treturn False", "def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard.\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Get rid of any remaining aliens and lasers.\n self.aliens.empty()\n self.lasers.empty()\n\n # Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n # Pause.\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)", "def _ship_hit(self):\n # livews are still remaining\n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard.\n self.stats.ships_left -= 1 # decrement number of lilves remaining\n self.sb.prep_ships() # Show how many ships are left.\n \n # Get rid of any remaining aliens and bullets.\n self.aliens.empty() # remove remaining aliens\n self.bullets.empty() # remove remaining bullets\n \n # Create a new fleet and center the ship.\n self._create_fleet() # create a fleet of Instances of alien objects\n self.ship.center_ship() # Center the ship on the screen\n \n # Pause.\n sleep(0.5) # sleep for half a second\n else: # no lives remaining\n self.stats.game_active = False # set game inactive\n pygame.mouse.set_visible(True) # set mouse pointer to visible", "def spritecollideany(sprite, group):\n for _sprite in group:\n if sprite.rect.intersects(_sprite.rect):\n return True\n return False", "def _ship_hit(self):\n\t\tif self.game_stats.ships_left > 0:\n\t\t\t# Reduce ship lives\n\t\t\tself.game_stats.ships_left -= 1\n\t\t\tself.scoreboard.prep_ships()\n\n\t\t\t# Get rid of aliens and remaining bullets\n\t\t\tself.aliens.empty()\n\t\t\tself.bullets.empty()\n\n\t\t\t# Create a new fleet\n\t\t\tself._create_fleet()\n\t\t\tself.ship.center_ship()\n\n\t\t\t# pause game\n\t\t\tsleep(0.5)\n\t\telse:\n\t\t\tself.game_stats.game_active = False\n\t\t\tpygame.mouse.set_visible(True)", "def _check_laser_alien_collisions(self):\n # Remove any lasers and aliens that have collided.\n collisions = pygame.sprite.groupcollide(\n self.lasers, self.aliens,\n self.settings.laser_collide_remove, True\n )\n\n if collisions:\n for aliens in collisions.values():\n self.stats.score += self.settings.alien_points * len(aliens)\n self.sb.prep_score()\n self.sb.check_high_score()\n\n self._start_new_level()", "def can_spawn_ship():\n\n return (\n ( \n (game.turn_number / constants.MAX_TURNS) <= SPAWN_SHIP_TURN\n and \n not game_map[me.shipyard].is_occupied\n and \n not game_map[me.shipyard].position in next_positions\n )\n and\n (\n (\n not stop_spending_halite\n and\n me.halite_amount >= constants.SHIP_COST\n )\n or\n (\n stop_spending_halite\n and\n me.halite_amount >= constants.SHIP_COST + constants.DROPOFF_COST\n\n )\n )\n and\n len(me.get_ships()) <= len(enemies_ships) * 1.75\n and\n halite_collected_ratio < 0.55\n )", "def has_collided(self):\n return any(self._joint_collision) or any(self._cartesian_collision)", "def check_collision(self): \n snake = self.snake.get_locations()\n stones = self.stones.get_locations()\n apples = self.apples.get_locations()\n\n snake_location = snake[1:]\n dead_area = set(snake_location + stones + self.wall)\n if snake[0] in dead_area:\n self.ctx['res_holder']['music'].HIT.play()\n self._set_state('over') \n\n # Check if we ate the apple\n if snake[0] in apples:\n idx = apples.index(snake[0])\n self.snake.update(grow=True)\n self.ctx['res_holder']['music'].POINT.play()\n self.points += 10 # TODO different for other entities\n self.apples.destroy(idx)\n self.apples.create(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True of 'ship' is in collision this alien, False otherwise
def has_collision(self, ship): distance = (self.position - ship.position).length() return distance < self.radius + ship.radius
[ "def has_collision(self, ship):\n for alien in self.aliens:\n if alien.has_collision(ship):\n return True\n return False", "def InShip(ships, x, y):\n coord = (x, y)\n for ship in ships:\n if coord in ship: \n return True\n return False", "def check_aliens_ship_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n ship_alien_collision = pygame.sprite.spritecollideany(ship, aliens)\n alien_on_the_bottom = alien_on_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)\n if ship_alien_collision or alien_on_the_bottom:\n ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)", "def can_spawn_ship():\n\n return (\n ( \n (game.turn_number / constants.MAX_TURNS) <= SPAWN_SHIP_TURN\n and \n not game_map[me.shipyard].is_occupied\n and \n not game_map[me.shipyard].position in next_positions\n )\n and\n (\n (\n not stop_spending_halite\n and\n me.halite_amount >= constants.SHIP_COST\n )\n or\n (\n stop_spending_halite\n and\n me.halite_amount >= constants.SHIP_COST + constants.DROPOFF_COST\n\n )\n )\n and\n len(me.get_ships()) <= len(enemies_ships) * 1.75\n and\n halite_collected_ratio < 0.55\n )", "def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard.\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Get rid of any remaining aliens and lasers.\n self.aliens.empty()\n self.lasers.empty()\n\n # Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n # Pause.\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)", "def gameover(self):\n for ship in self.ships_list:\n for coordinate in ship.ship_coordinates():\n if coordinate not in self.hits_lists:\n return False\n return True", "def ships_overlap(ship1, ship2):\n for ship1_coord in ship1.ship_coordinates():\n for ship2_coord in ship2.ship_coordinates():\n if ship1_coord == ship2_coord:\n return True\n return False", "def _ship_hit(self):\n\t\tif self.game_stats.ships_left > 0:\n\t\t\t# Reduce ship lives\n\t\t\tself.game_stats.ships_left -= 1\n\t\t\tself.scoreboard.prep_ships()\n\n\t\t\t# Get rid of aliens and remaining bullets\n\t\t\tself.aliens.empty()\n\t\t\tself.bullets.empty()\n\n\t\t\t# Create a new fleet\n\t\t\tself._create_fleet()\n\t\t\tself.ship.center_ship()\n\n\t\t\t# pause game\n\t\t\tsleep(0.5)\n\t\telse:\n\t\t\tself.game_stats.game_active = False\n\t\t\tpygame.mouse.set_visible(True)", "def is_collision(self, x, y):\n if x == self.head_cord['x'] and y == self.head_cord['y']:\n return True\n return False", "def _ship_hit(self):\n # livews are still remaining\n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard.\n self.stats.ships_left -= 1 # decrement number of lilves remaining\n self.sb.prep_ships() # Show how many ships are left.\n \n # Get rid of any remaining aliens and bullets.\n self.aliens.empty() # remove remaining aliens\n self.bullets.empty() # remove remaining bullets\n \n # Create a new fleet and center the ship.\n self._create_fleet() # create a fleet of Instances of alien objects\n self.ship.center_ship() # Center the ship on the screen\n \n # Pause.\n sleep(0.5) # sleep for half a second\n else: # no lives remaining\n self.stats.game_active = False # set game inactive\n pygame.mouse.set_visible(True) # set mouse pointer to visible", "def CheckShipBoundaries(ships):\n ships_copy = list(ships)\n while(len(ships_copy)): # compare each ships coords to each other\n ship = ships_copy.pop() # ships coords.\n\n for acoord in ship:\n for other_ship in ships_copy:\n for bcoord in other_ship:\n a = abs(acoord[0]-bcoord[0]) # Distance on X-axis\n b = abs(acoord[1]-bcoord[1]) # Distance on Y-axis\n\n # same row or column\n if (a==0 and b<2) or (a==0 and b<2):\n return False\n else:\n # distance from a to b calculated by Pythagorus.\n if math.sqrt(a**2 + b**2) < 2:\n return False\n return True", "def _check_bullet_alien_ship_collisions(self):\n # Remove the alien ship if a bullet hits.\n collision = pg.sprite.groupcollide(self.bullets, self.fleet, True, True)\n if collision:\n self.stats.score += self.settings.point\n self.sb.prep_score()\n self.sb.check_high_score()\n\n if not self.fleet:\n self.bullets.empty()\n self._create_fleet()\n self.settings.increase_speed()", "def detect_collision(self):\n\n has_collided, obstacle = check_collision(\n self.model.ship, self.model.current_obstacles)\n\n if has_collided:\n self.model.ship.lives -= 1\n self.model.current_obstacles.remove(obstacle)\n\n if not self.model.ship.lives:\n self.model.current_screen = \"Game Over\"", "def is_every_ship_sunk(self):\n\n ships_condition = False if False in [ship.check_status() for ship in self.ships] else True\n\n return ships_condition", "def _check_alien_ship_hits_bottom(self):\n screen_rect = self.screen.get_rect()\n for alien_ship in self.fleet:\n if alien_ship.rect.bottom >= screen_rect.bottom:\n # Treat as if the ship got hit.\n self._losing_ship_response()\n break", "def is_any_dead(Gameboard):\n\t\tfor ship in Ship.ships:\n\t\t\thit_counter = 0\n\t\t\tif ship.alive:\t\t\t\t\n\t\t\t\tfor i in range(ship.length):\n\t\t\t\t\tif Gameboard.visibleGameboard[ship._shipCoordinatesY[i]][ship._shipCoordinatesX[i]] == \"X\":\n\t\t\t\t\t\thit_counter += 1\n\n\t\t\t\tif hit_counter == ship.length:\n\t\t\t\t\tship.alive = False\n\t\t\t\t\tShip.AliveShips -= 1\n\t\t\t\t\treturn True\n\t\treturn False", "def is_attacked_at(self, coord_x: int, coord_y: int) -> Tuple[bool, bool]:\n # Save shot\n self.set_coordinates_previous_shots.add((coord_x, coord_y))\n\n # Check each ship to see if it has been hit\n ship_damages = []\n for s in self.list_ships:\n s.gets_damage_at(coord_x, coord_y)\n ship_hit = (coord_x, coord_y) in s.set_coordinates_damages\n ship_damages.append((ship_hit, s.has_sunk()))\n\n return ship_damages", "def collision(self):\n t = self.currtetri\n a = t.angle / 90\n for i in range(5):\n for j in range(5):\n if t.matrix[a][i][j] and self.grid[i + t.row][j + t.col]:\n return True\n return False", "def has_collided(self):\n return any(self._joint_collision) or any(self._cartesian_collision)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the time frequency and prediction length parameters. This method must be called before being able to use `predict`.
def set_prediction_parameters(self, freq, prediction_length): self.freq = freq self.prediction_length = prediction_length
[ "def classifierSetVariables(self, setSize, time):\r\n\r\n \tself.prediction = cons.predictionIni\r\n \tself.predictionError = cons.predictionErrorIni\r\n \tself.fitness = cons.fitnessIni\r\n\r\n \tself.numerosity = 1\r\n \tself.experience = 0\r\n \tself.actionSetSize = setSize\r\n \tself.timeStamp = time", "def set_freq_offset(self):\n\n # 1. The frequency of the model is the highest frequency of the Tseries\n self.freq = None\n for tseries in self.tseriesdict.values():\n if not tseries.stress.empty:\n if self.freq is None:\n self.freq = tseries.freq\n else:\n # use the highest frequency\n if get_dt(tseries.freq) < get_dt(self.freq):\n self.freq = tseries.freq\n\n if self.freq is None:\n self.freq = 'D'\n\n # 2. Tseries timestamps should match (e.g. similar hours')\n # calculate frequency and time-difference with default frequency\n time_offsets = set()\n for tseries in self.tseriesdict.values():\n if not tseries.stress.empty:\n # calculate the offset from the default frequency\n time_offset = get_time_offset(tseries.stress.index[0],\n self.freq)\n time_offsets.add(time_offset)\n\n assert len(\n time_offsets) <= 1, 'The time-differences with the default frequency is' \\\n ' not the same for all stresses.'\n if len(time_offsets) == 1:\n self.time_offset = next(iter(time_offsets))\n else:\n self.time_offset = datetime.timedelta(0)", "def set_frequency(self, frequency):\r\n self.obs.centerFreqHz = float(frequency)\r\n self.ref.centerFreqHz = float(frequency)\r\n self.ave.centerFreqHz = float(frequency)\r\n self.hot.centerFreqHz = float(frequency)\r\n self.cold.centerFreqHz = float(frequency)\r\n deltaNu = self.obs.bandwidthHz/float(self.vlen)\r\n n0 = self.obs.centerFreqHz - (self.obs.bandwidthHz/2.)\r\n nu = n0\r\n print(\"Setting Frequency: %10.0f Hz\" % (self.obs.centerFreqHz))\r\n nx = len( self.obs.xdata)\r\n if nx != self.vlen:\r\n self.update_len(self.obs)\r\n for iii in range(self.vlen):\r\n self.obs.xdata[iii] = nu\r\n nu = nu + deltaNu", "def _set_time_frequencies(self, frequencies):\n request = time_freq_support_pb2.TimeFreqSupportUpdateRequest()\n request.time_freq_support.CopyFrom(self._message)\n request.freq_real.CopyFrom(frequencies._message)\n self._stub.Update(request)", "def update_parameters(self, params):\n self.tbf.update_lengthscales(np.exp(params[:self.D])) # update TBF lengthscales\n self.tbf.update_amplitude(np.exp(2*params[self.D])) # update TBF amplitude\n self.var_n = np.exp(2*params[self.D + 1]) # update noise variance\n self.tbf.update_frequencies(params[self.D + 2:]) # update the TBF spectral frequencies", "def updateTimeFactors(self, new_rtf, new_freq, new_dt):\n self.realtime_factor = new_rtf\n self.frequency = new_freq\n\n self.step_size = new_dt", "def setFrequency(self, frequency: int) -> None:\n self.frequency = frequency", "def initialize_parameters(self):\n\n self.n_inputs = len(self.df.columns[:-1])\n self.n_hidden_per_layer = 3\n self.n_hidden = 2\n self.n_outputs = len(self.df.Class.unique()) if self.c_t == \"classification\" else 1\n self.learning_rate = .07\n self.epochs = 3\n self.momentum_factor = .5\n self.performance = 0", "def initialize_time(self):\n self._cur_time = 0\n self._model_timestep = self.sim.model.opt.timestep\n self._control_timestep = 1.0 / self._control_freq", "def test_predict_length():\n\tmodel = pf.GASLLT(data=data, family=pf.GASNormal())\n\tx = model.fit()\n\tx.summary()\n\tassert(model.predict(h=5).shape[0] == 5)", "def setNumberOfTimepoints(self, timepoints):\n\t\tself.length = timepoints", "def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:", "def set_freq(self, f_obs):\n return _radio_astro_swig.detect_set_freq(self, f_obs)", "def set_parameters(self, parameters: DecisionForestParameters):\n self.parameters = tensor_forest.ForestHParams(\n num_classes=parameters.num_classes,\n num_features=parameters.num_features,\n num_trees=parameters.num_trees,\n max_nodes=parameters.max_nodes,\n inference_tree_paths=parameters.inference_tree_paths\n ).fill()\n\n self.batch_size = parameters.batch_size\n self.use_training_loss = parameters.use_training_loss\n self.report_feature_importances = parameters.report_feature_importances\n self.model_dir = parameters.model_dir", "def set_seq_len(self, context_frames, pred_frames, seq_step):\n self.datasets[\"main\"].set_seq_len(context_frames, pred_frames, seq_step)\n\n # set the seq_len for val_data as well if it's a separate dataset\n if self.is_training_set() and not isinstance(self.val_data, Subset):\n self.val_data.set_seq_len(context_frames, pred_frames, seq_step)\n self.is_ready = True", "def setFreq(self, freq, target_value = None):\n if target_value != None:\n self.freq[ target_value ] = freq\n else:\n self.freq = freq", "def _set_padding_lengths(self, dataset_padding_lengths: Dict[str, int]):\n if not self.use_dynamic_padding and self.num_sentence_words is None:\n self.num_sentence_words = dataset_padding_lengths.get('num_sentence_words', None)\n if not self.use_dynamic_padding and self.num_word_characters is None:\n self.num_word_characters = dataset_padding_lengths.get('num_word_characters', None)", "def test_t_predict_is_length():\n\tmodel = pf.GASLLT(data=data, family=pf.GASt())\n\tx = model.fit()\n\tassert(model.predict_is(h=5).shape[0] == 5)", "def train_sequence_length(self) -> int:\n pass", "def hyperparameter_length_scale(self):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Requests the prediction of for the time series listed in `ts`, each with the (optional) corresponding category listed in `cat`.
def predict(self, ts, cat=None, encoding="utf-8", num_samples=100, quantiles=["0.1", "0.75", "0.9"]): prediction_times = [x.index[-1]+1 for x in ts] req = self.__encode_request(ts, cat, encoding, num_samples, quantiles) res = super(DeepARPredictor, self).predict(req) return self.__decode_response(res, prediction_times, encoding)
[ "def predict(self, ts, cat=None, encoding=\"utf-8\", num_samples=100, quantiles=[\"0.1\", \"0.5\", \"0.9\"], content_type=\"application/json\"):\n \n prediction_times=[]\n req=[]\n if type(ts)==list:\n prediction_times = [x.index[-1]+pd.Timedelta(1, unit=self.freq) for x in ts]\n req = self.__encode_request(ts, cat, encoding, num_samples, quantiles)\n elif type(ts)==dict:\n \n prediction_times=[]\n target_len=len(ts['target'])\n t0=ts['start']\n prediction_times.append(t0)\n \n \n \n req={\n 'instances': [ts],\n 'configuration': {\"num_samples\": 100, \"output_types\": [\"quantiles\"], \"quantiles\": [\"0.5\"]}#[\"0.1\", \"0.5\", \"0.9\"]}\n }\n req=json.dumps(req).encode('utf-8')\n elif type(ts)==bytes:\n prediction_times=[]\n req=ts\n print(\"IN HERE\")\n prediction_times.append(json.loads(req)['instances'][0]['start'])\n \n res = super(DeepARPredictor, self).predict(req, initial_args={\"ContentType\": content_type})\n \n return self.__decode_response(res, prediction_times, encoding)", "def predict_category(merchant):\n pass", "def catPrediction():\n #import data to predict\n df_pred_cat = pd.read_csv(\"predictCat_df.csv\")\n df_pred_cat = df_pred_cat.drop(['Unnamed: 0.1', 'chosen_match_final'], axis = 1)\n\n #import models\n G_pred_rf_model = pickle.load(open('GBM_Pred_G.txt', 'rb'))\n\n #predict coverage categories\n A_pred = df_pred_cat['A_final']\n B_pred = df_pred_cat['B_final']\n C_pred = df_pred_cat['C_final']\n D_pred = df_pred_cat['D_final']\n E_pred = df_pred_cat['E_final']\n F_pred = df_pred_cat['F_final']\n G_pred = G_pred_rf_model.predict(df_pred_cat)\n\n #input category predictions into dataframe\n df_pred_cat['A_chosen'] = pd.Series(A_pred, index=df_pred_cat.index)\n df_pred_cat['B_chosen'] = pd.Series(B_pred, index=df_pred_cat.index)\n df_pred_cat['C_chosen'] = pd.Series(C_pred, index=df_pred_cat.index)\n df_pred_cat['D_chosen'] = pd.Series(D_pred, index=df_pred_cat.index)\n df_pred_cat['E_chosen'] = pd.Series(E_pred, index=df_pred_cat.index)\n df_pred_cat['F_chosen'] = pd.Series(F_pred, index=df_pred_cat.index)\n df_pred_cat['G_chosen'] = pd.Series(G_pred, index=df_pred_cat.index)\n\n #save dataframe with predictions\n df_pred_cat.to_csv(\"df_predicted_cat.csv\")\n join()", "def _predict_transport_mode_simple_coarse(triplegs_in, categories):\n if not (_check_categories(categories)):\n raise ValueError(\"the categories must be in increasing order\")\n\n triplegs = triplegs_in.copy()\n\n def category_by_speed(speed):\n \"\"\"\n Identify the mode based on the (overall) tripleg speed.\n\n Parameters\n ----------\n speed : float\n the speed of one tripleg\n\n Returns\n -------\n str\n the identified mode.\n \"\"\"\n for bound in categories:\n if speed < bound:\n return categories[bound]\n\n triplegs_speed = get_speed_triplegs(triplegs)\n\n triplegs[\"mode\"] = triplegs_speed[\"speed\"].apply(category_by_speed)\n return triplegs", "def predict(model, ts_test):\r\n n_periods = ts_test.shape[0]\r\n df_dates = model.make_future_dataframe(periods=n_periods, include_history=False)\r\n model_prediction = model.predict(df_dates)\r\n y_pred = model_prediction[['ds', 'yhat']]\r\n y_pred = y_pred.set_index('ds')\r\n y_pred['yhat'] = y_pred['yhat']\r\n return y_pred['yhat']", "def get_model_predictions_by_day(date):", "def predict():\n # pass the song into the lclf object, like before\n\n # now, convert the results into json!\n\n # return the json data to the endpoint.\n return data", "def test_multiple_predict_candidates():\n\n inputs = [{\"SMILES\": \"c1(C=O)cc(OC)c(O)cc1\"}, {\"SMILES\": \"C=C\"}]\n vid = 177\n\n prediction_results = client.predict(vid, inputs, method=\"scalar\")\n assert len(prediction_results) == 2\n assert type(prediction_results[0]) == PredictionResult\n assert type(prediction_results[1]) == PredictionResult", "def predict_categories(model: \"Model\", X: pd.DataFrame) -> np.ndarray:\n proba_df, cats_df = model.predict(X)\n return cats_df.category.to_numpy(dtype=\"int32\")", "def accuracy_per_category(pred, label, categories):\n pred, label = list(pred), list(label)\n results = []\n for cat in range(categories):\n vfunc = np.vectorize(lambda x: 1 if x == cat else 0)\n mapped_pred = vfunc(pred)\n mapped_labels = vfunc(label)\n right = float(np.dot(mapped_pred, mapped_labels))\n total = np.sum(mapped_labels)\n if total == 0:\n results.append(0.0)\n else:\n results.append((right / total))\n return results", "def predict_categories(\n title: str, body: str, _settings: Dict[str, Any] = None\n) -> List[Tuple[str, float]]:\n vectorizer: Vectorizer = _CACHE.get(\"vectorizer\")\n if not vectorizer:\n logger.info(\"[CATEGORIES] Models not in cache, loading ...\")\n encoder, vectorizer, urgency, category = load_models()\n _CACHE[\"encoder\"] = encoder\n _CACHE[\"vectorizer\"] = vectorizer\n _CACHE[\"urgency\"] = urgency\n _CACHE[\"category\"] = category\n else:\n encoder: LabelEncoder = _CACHE[\"encoder\"]\n urgency: Classifier = _CACHE[\"urgency\"]\n category: Classifier = _CACHE[\"category\"]\n\n logger.info(\"[CATEGORIES] Tokenizing the ticket ...\")\n vectors = vectorizer(title, body)\n\n logger.info(\"[CATEGORIES] Predicting the ticket's urgency ...\")\n urgency_pred = urgency(vectors)\n u_value, u_cls = urgency_pred[0].max(), urgency_pred[0].argmax()\n\n logger.info(\"[CATEGORIES] Predicting the ticket's category ...\")\n category_pred = category(vectors)\n c_value, c_cls = category_pred[0].max(), category_pred[0].argmax()\n\n decoded = encoder.inverse_transform([c_cls])[0]\n return [(u_cls, u_value), (CATEGORIES[decoded], c_value)]", "def predict(self, ts):\n self._check_is_fitted()\n self._check_is_standardized()\n self._check_data_format(ts)\n\n # Bring input into correct format for model train and prediction\n ts_standardized = self._standardize(ts, locked=True)\n ts_standardized = ts_standardized.astype('float32')\n X = self._sequentialize(ts_standardized)[0]\n\n # Undo standardization for correct scale of predicted values.\n prediction = self._model.predict(X, self.batch_size)\n\n return self._unstandardize(prediction)", "def parse_train_json(self, line, category):\n json_data = re.sub('\\r\\n', '', line)\n jdata = json.loads(json_data)\n num_results = size(jdata['d']['results'])\n #print \"num of results: \", num_results\n if category == self.cat[0]:\n self.num_ent_train += num_results\n for i in xrange(num_results):\n #print i,\n docid = repr(jdata['d']['results'][i]['ID']).strip('\\u')\n title = repr(jdata['d']['results'][i]['Title']).strip('\\u')\n self.ent_train[docid] = [title]\n description = repr(jdata['d']['results'][i]['Description']).strip('\\u')\n self.ent_train[docid].append(description)\n #self.true_train_class[docid]= category\n elif category == self.cat[1]:\n self.num_bus_train += num_results\n for i in xrange(num_results):\n #print i,\n docid = repr(jdata['d']['results'][i]['ID']).strip('\\u')\n title = repr(jdata['d']['results'][i]['Title']).strip('\\u')\n self.bus_train[docid] = [title]\n description = repr(jdata['d']['results'][i]['Description']).strip('\\u')\n self.bus_train[docid].append(description)\n #self.true_train_class[docid]= category\n elif category == self.cat[2]:\n self.num_pol_train += num_results\n for i in xrange(num_results):\n #print i,\n docid = repr(jdata['d']['results'][i]['ID']).strip('\\u')\n title = repr(jdata['d']['results'][i]['Title']).strip('\\u')\n self.pol_train[docid] = [title]\n description = repr(jdata['d']['results'][i]['Description']).strip('\\u')\n self.pol_train[docid].append(description)\n #self.true_train_class[docid]= category\n else:\n print \"Can't find the train category: \", category", "def predict_category(model, image):\n prediction = model.predict(image)\n category_sample = np.argmax(prediction)\n category_name_sample = categories[category_sample]\n category_name_sample = category_name_sample.decode('UTF-8').capitalize().replace(\"_\", \" \")\n category_name_sample\n return category_name_sample", "def predict_from_batch(self, batch: pd.DataFrame):\n pass", "def results_cat(self, category: STATS_CATEGORY) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:\n cat = self.category_steps.get(category, [])\n if len(cat) == 1:\n return self.results[cat[0]]\n if len(cat) > 1:\n return {c: self.results[c] for c in cat}\n return {}", "def obtain_class_predictions(yp, activity_dict, activity_to_int_dict, int_to_activity_dict, k=1): \n\n print('Transforming regression predictions to classes')\n\n # Simple approach: use fors and check one by one\n \n def closest_activity(pred, activity_dict):\n min_dist = 100.0\n activity = \"\"\n for key in activity_dict:\n dist = distance.cosine(pred, activity_dict[key])\n if dist < min_dist: \n min_dist = dist\n activity = key\n return activity, min_dist\n \n def closest_k_activities(pred, activity_dict, k):\n activities = [\"!!!\"]*k # Build an array of empty activity names (strings)\n min_dists = [1000000.0]*k # Build an array of distances\n for key in activity_dict: # TODO: Ignore 'None' activities that are in activity_dict (if 'no_nones')\n dist = distance.cosine(pred, activity_dict[key])\n i = 0\n inserted = False\n while i < k and not inserted:\n if dist < min_dists[i]:\n activities.insert(i, key) # The other activities are displaced\n min_dists.insert(i, dist)\n # Remove the last element of the lists\n activities.pop(-1)\n min_dists.pop(-1)\n #activities[i] = key\n #min_dists[i] = dist\n inserted = True \n\n i += 1\n\n return activities, min_dists\n\n ypred = []\n for i in range(len(yp)): \n activities, dists = closest_k_activities(yp[i], activity_dict, k) \n acti_indices = np.full(len(activities), -1)\n i = 0\n for act_name in activities:\n try:\n acti_indices[i] = activity_to_int_dict[act_name]\n except KeyError:\n print(\"Activities: \" + str(activities))\n sys.exit()\n \n i += 1 \n\n ypred.append(acti_indices) \n \n ypred = np.array(ypred) \n unique_act_indices = np.unique(ypred)\n #unique_act_names = [int_to_activity_dict[str(x)] for x in unique_act_indices]\n print(\"Predicted unique activities:\")\n print(unique_act_indices) \n\n return ypred", "def crawl_predict_fast(user_ds, env):\n return crawl_predict(user_ds, env, 1)", "def parse_tags_categories(response):\n responsedatasets = response['hits']['hits']\n tags = []\n categories = []\n for responsedataset in responsedatasets:\n _tags = responsedataset['_source']['tags']\n _categories = responsedataset['_source']['categories']\n for tag in _tags:\n tags.append(tag)\n for category in _categories:\n categories.append(category)\n return tags, categories" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse an individual show page.
def parse_show_page(response): # Parse the html soup = BeautifulSoup(response.text) # Find the data on the page venue_el = soup.find('h3').a venue = venue_el.string h4_els = soup.findAll('h4') date_el = h4_els[0] date = date_el.string location_el = h4_els[1] location = location_el.string next_page_url = None next_page_anchors = soup.select('div.nextshow a') if next_page_anchors: next_page_el = next_page_anchors[0] next_page_url = next_page_el.get('href') return {"date": date, "location": location, "venue": venue, "next": next_page_url}
[ "def parse_detail(self, response):\n text = \"\".join(response.css(\".article-body p\")[0].css(\"p *::text\").getall())\n yield {\n \"url\": response.url,\n \"title\": get_clean_investopedia_title(\n response.css(\"h1.article-heading::text\").get().strip()\n ),\n \"text\": text.replace(\"\\xa0\", \" \").strip(),\n }", "def parse_page(soup, movie_id):\n title = soup.find(attrs={'itemprop': 'name'}).string\n alt_title = soup.find(attrs={'itemprop': 'alternateName'}).string\n year = soup.find(name='small').a.string\n genres = list(genre.string for genre in soup.find_all(attrs={'itemprop': 'genre'}))\n countries = list(a.string for a in soup.find(attrs={'class': 'main'}).find_all('a') if not a.get('itemprop'))\n description = soup.find(attrs={'itemprop': 'description'}).contents[0].strip()\n director = soup.find(id='directors').find(attrs={'class': 'person'}).string\n actors = list(actor.string for actor in soup.find(id='actors').find_all(attrs={'class': 'person'}))\n imdb = soup.find(attrs={'class': 'rating'}).string\n tags = 'No tags'\n if soup.find(id='tags'):\n tags = list(tag.string for tag in soup.find(id='tags').find_all('a'))\n poster_link = soup.find(attrs={'class': 'posterbig'}).find(name='img').get('src')\n\n movie_info = {\n 'movie_id': movie_id,\n 'title': title,\n 'alt_title': alt_title,\n 'year': year,\n 'genres': genres,\n 'countries': countries,\n 'description': description,\n 'director': director,\n 'actors': actors,\n 'imdb': imdb,\n 'poster_link': poster_link\n }\n\n if tags is not 'No tags':\n movie_info['tags'] = tags\n\n return movie_info", "def parse_detail(self, response):\n tv_detail = response.selector.xpath('//ul[@class=\"video_info\"]/li')\n item = TVItem()\n tv_id = int(re.search(r'\\d+', response.url).group(0))\n item['id'] = tv_id\n title = response.selector.xpath('//div[@class=\"info-main-title\"]/span/a/text()').extract_first()\n item['name'] = str(title).split(\" \", 1)[0]\n item['season'] = self.get_value_by_key(tv_detail, \"季数:\")\n item['episode_num'] = self.get_value_by_key(tv_detail, \"集数:\")\n item['desc'] = \"\"\n item['episode'] = []\n video_list = response.selector.xpath('//div[@class=\"playlist\"]/a')\n for video in video_list:\n episode = TvEpisode()\n episode['tv_id'] = tv_id\n episode['num'] = video.xpath('./text()').extract_first()\n episode['video_url'] = video.attrib['href']\n item['episode'].append(episode)\n yield item", "def parse_overview_page1(self, response):\n\t\tcomm = response.meta['comm'] # the private/commercial indicator\n\t\t#cityid = response.meta['cityid'] # the id of the city of which we look for the ads (as string)\n\t\t# find the number of pages in total and open all other pages from 1,...,last page\n\t\tif len(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong')) > 1:\n\t\t\tnumpages = int(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong[2]/text()').extract()[0])\n\t\t\tfor pageno in xrange(1,numpages+1):\n\t\t\t\t# we have to re-post our form for the filter settings\n\t\t\t\t#request = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno), 'cityid': cityid},\n\t\t\t\t#\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno)},\n\t\t\t\t\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\t\t\t# find the immoscout ads for this site\n\t\t\t\trequest = scrapy.Request('http://www.quoka.de/qs/qpc/xmlSearch.php?search=&view=quoka&platform=desktop&catid=27_2710&maxresults=20&page=' +str(pageno)+\n\t\t\t\t\t\t\t\t\t\t'&output=json&oe=UTF-8', callback=self.parse_immoscout)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\telse:\n\t\t\t# in this case there is no \"Seite 1 von n\", so we simply scrape this page\n\t\t\trequest = scrapy.Request(response.url, callback=self.parse_overview_page2)\n\t\t\trequest.meta['comm'] = comm\n\t\t\tyield request", "def parse_page_details(content, filename=None):\n\n match = re.match('\\s*\\+\\+\\+(.*?)\\+\\+\\+', content, re.M | re.S)\n if not match:\n return content, {}\n\n content = content[match.end():]\n options = load_page_details(match.group(1), filename)\n return content, options", "def fetch_page(name):\n\n params = {\"action\": \"parse\", \"format\": \"json\", \"page\": name}\n rv = requests.get(WIKIMEDIA_API_URL, params=params)\n if rv.status_code != 200:\n print(f\"Unexpected HTTP code: {rv.status_code}\\n{rv}\")\n return None\n\n rv.encoding = \"utf-8\"\n data = rv.json()\n try:\n body = data[\"parse\"][\"text\"][\"*\"]\n title = data[\"parse\"][\"title\"]\n except ValueError:\n print(\"Something is wrong with the server response\")\n raise\n\n return title, body", "def parse_movie(self, response, curr_movie, data_proto, result_list):\n title_section, detail_section = curr_movie\n title = ''.join(title_section.xpath('./text()').extract())\n movie_data_proto = ShowingLoader(response=response)\n movie_data_proto.add_value(None, data_proto.load_item())\n movie_data_proto.add_title(title=title)\n title_list = movie_data_proto.get_title_list()\n if not self.is_movie_crawl(title_list):\n return\n screen_section_list = detail_section.xpath('.//table')\n for curr_screen in screen_section_list:\n self.parse_screen(response, curr_screen,\n movie_data_proto, result_list)", "def parse(self, response):\n theater_list = response.xpath('//li[@class=\"clearfix\"]')\n for theater_element in theater_list:\n cinema_name = theater_element.xpath(\n './p[@class=\"theaterName\"]/a/text()').extract_first()\n data_proto = ShowingLoader(response=response)\n data_proto.add_cinema_name(cinema_name)\n cinema_name = data_proto.get_output_value('cinema_name')\n if not self.is_cinema_crawl([cinema_name]):\n continue\n curr_cinema_url = theater_element.xpath(\n './p[@class=\"theaterName\"]/a/@href').extract_first()\n data_proto.add_cinema_site(\n response.urljoin(curr_cinema_url), cinema_name)\n data_proto.add_value('source', self.name)\n cinema_name_en = curr_cinema_url.split('/')[-1]\n json_url = self.generate_cinema_schedule_url(\n cinema_name_en, self.date)\n request = scrapy.Request(json_url, callback=self.parse_cinema)\n request.meta[\"data_proto\"] = data_proto.load_item()\n yield request", "def parse_movie(self, response, curr_movie, data_proto, result_list):\n title = curr_movie.xpath('./header//h2/text()').extract_first()\n title_en = curr_movie.xpath('./header//p/text()').extract_first()\n movie_data_proto = ShowingLoader(response=response)\n movie_data_proto.add_value(None, data_proto.load_item())\n movie_data_proto.add_title(title=title, title_en=title_en)\n title_list = movie_data_proto.get_title_list()\n if not self.is_movie_crawl(title_list):\n return\n screen_section_list = curr_movie.xpath('./ul')\n for curr_screen in screen_section_list:\n self.parse_screen(response, curr_screen,\n movie_data_proto, result_list)", "def parse_movie(self, response, curr_movie, data_proto, result_list):\n title = curr_movie['name']\n movie_data_proto = ShowingLoader(response=response)\n movie_data_proto.add_value(None, data_proto.load_item())\n movie_data_proto.add_title(title=title)\n title_list = movie_data_proto.get_title_list()\n if not self.is_movie_crawl(title_list):\n return\n screen_list = []\n if isinstance(curr_movie['screen'], dict):\n screen_list.append(curr_movie['screen'])\n else:\n screen_list = curr_movie['screen']\n for curr_screen in screen_list:\n self.parse_screen(response, curr_screen,\n movie_data_proto, result_list)", "def parse_line(self, text):\n result = {}\n\n # Using _re_valid_show we will match both the Show and Episode\n show_matches = self._re_valid_show.match(text)\n if show_matches:\n distribution = show_matches.group(1)\n votes = int(show_matches.group(3))\n ratings = float(show_matches.group(4))\n\n show_title = show_matches.group(5)\n show_year = show_matches.group(6)\n\n result = {\n 'type': \"Show\",\n 'show_title': show_title,\n 'year': int(show_year),\n 'ratings': float(ratings),\n 'votes': int(votes),\n 'distribution': distribution\n }\n else:\n # Nothing more to do here\n return {}\n\n # If _re_valid_episode is a match we will add episode information\n episode_matches = self._re_valid_episode.match(text)\n if episode_matches:\n # Change the type from Show to Episode\n result['type'] = \"Episode\"\n\n #episode_details = self.parse_episode(episode_matches.group(1))\n \"\"\"\n The string containing episode details is not nicely formatted by IMDb\n It can be:\n \"episode_title\"\n \"episode_title(#2.3)\"\n \"episode_title(#3)\"\n \"(#2.3)\"\n \"(#3)\"\n \"\"\"\n\n split_results = self._re_episode_season_and_number.split(episode_matches.group(1))\n if len(split_results) == 1:\n # We have only the title\n result['episode_title'] = split_results[0]\n result['season'] = 0\n result['number'] = 0\n elif len(split_results) == 3:\n result[\"episode_title\"] = split_results[0]\n\n dot_split_result = split_results[1].split('.')\n if len(dot_split_result) == 2:\n result['season'] = int(dot_split_result[0])\n result['number'] = int(dot_split_result[1])\n else:\n result['season'] = 1\n result['number'] = int(dot_split_result[0])\n else:\n print(\"parse_episode unexpected split results, original text is: \" + text)\n\n return result", "def parse_site_page(url):\n\n import re\n\n url_request = get_request(url)\n soup = BeautifulSoup(url_request, 'html.parser')\n\n pattern = re.compile(r'entry+')\n div_tags = soup.find_all('div', id=pattern)\n\n return_list = []\n for div in div_tags:\n a_tag = div.find('a')\n name = a_tag.find('h2').text\n link = a_tag.get('href') # link on anime\n\n anime_request = get_request(link)\n anime_soap = BeautifulSoup(anime_request, 'html.parser') # html of anime page\n\n description = anime_soap.find('div', {'class': 'kino-desc full-text clearfix noselect'}).text.replace('\\n', '')\n\n anime_ul = anime_soap.find('ul', {'class': 'kino-lines ignore-select'})\n ul_links = anime_ul.find_all('a')\n genre = ' '.join(a.text for a in ul_links if a.text in GENRES)\n\n rating = anime_soap.find('ul', {'class': 'unit-rating'}).find('li').text\n\n image_url = 'http://baza1.animevost.tv/' + anime_soap.find('a', {'class': 'highslide'}).find('img').get('src')\n\n return_list.append({\n 'name': name,\n 'link': link,\n 'genre': genre,\n 'rating': rating,\n 'description': description,\n 'image': image_url\n })\n\n return return_list", "def load_live_shows(self):\n\n if not self.shows:\n # thanks, obama\n user_agent = (\"Mozilla/5.0 (Windows NT 6.3; rv:36.0) \"\n \"Gecko/20100101 Firefox/36.0\"\n )\n headers = {'user-agent': user_agent}\n\n venue_html = requests.get(self.url, headers=headers)\n venue_html.raise_for_status()\n venue_html = venue_html.text\n self.make_shows(venue_html)\n else:\n print(\"Shows list already populated\")", "def parse_page(self, soup):\n posts = self.get_posts(soup)\n out = \"\"\n for post in posts:\n out += self.format_post(post)\n return out", "def show(self, show_id, market=None):\n\n trid = self._get_id(\"show\", show_id)\n return self._get(\"shows/\" + trid, market=market)", "def _ExtractDataFromShowHtml(self, html):\n htmlLines = html.splitlines()\n for count, line in enumerate(htmlLines):\n if line.strip() == r'<pre>':\n startLine = count+1\n if line.strip() == r'</pre>':\n endLine = count\n\n try:\n dataList = htmlLines[startLine:endLine]\n dataString = '\\n'.join(dataList)\n return dataString.strip()\n except:\n raise Exception(\"Show content not found - check EPGuides html formatting\")", "def parse_sidebar(self, manga_page):\n\n try:\n manga_info = super(Manga, self).parse_sidebar(manga_page)\n except media.InvalidMediaError as e:\n raise InvalidMangaError(e.id)\n\n info_panel_first = manga_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')\n\n try:\n volumes_tag = info_panel_first.find(text=u'Volumes:').parent.parent\n utilities.extract_tags(volumes_tag.find_all(u'span', {'class': 'dark_text'}))\n manga_info[u'volumes'] = int(volumes_tag.text.strip()) if volumes_tag.text.strip() != 'Unknown' else None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n chapters_tag = info_panel_first.find(text=u'Chapters:').parent.parent\n utilities.extract_tags(chapters_tag.find_all(u'span', {'class': 'dark_text'}))\n manga_info[u'chapters'] = int(chapters_tag.text.strip()) if chapters_tag.text.strip() != 'Unknown' else None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n published_tag = info_panel_first.find(text=u'Published:').parent.parent\n utilities.extract_tags(published_tag.find_all(u'span', {'class': 'dark_text'}))\n published_parts = published_tag.text.strip().split(u' to ')\n if len(published_parts) == 1:\n # this published once.\n try:\n published_date = utilities.parse_profile_date(published_parts[0])\n except ValueError:\n raise MalformedMangaPageError(self.id, published_parts[0], message=\"Could not parse single publish date\")\n manga_info[u'published'] = (published_date,)\n else:\n # two publishing dates.\n try:\n publish_start = utilities.parse_profile_date(published_parts[0])\n except ValueError:\n raise MalformedMangaPageError(self.id, published_parts[0], message=\"Could not parse first of two publish dates\")\n if published_parts == u'?':\n # this is still publishing.\n publish_end = None\n else:\n try:\n publish_end = utilities.parse_profile_date(published_parts[1])\n except ValueError:\n raise MalformedMangaPageError(self.id, published_parts[1], message=\"Could not parse second of two publish dates\")\n manga_info[u'published'] = (publish_start, publish_end)\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n authors_tag = info_panel_first.find(text=u'Authors:').parent.parent\n utilities.extract_tags(authors_tag.find_all(u'span', {'class': 'dark_text'}))\n manga_info[u'authors'] = {}\n for author_link in authors_tag.find_all('a'):\n link_parts = author_link.get('href').split('/')\n # of the form /people/1867/Naoki_Urasawa\n person = self.session.person(int(link_parts[2])).set({'name': author_link.text})\n role = author_link.nextSibling.replace(' (', '').replace(')', '')\n manga_info[u'authors'][person] = role\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n serialization_tag = info_panel_first.find(text=u'Serialization:').parent.parent\n publication_link = serialization_tag.find('a')\n manga_info[u'serialization'] = None\n if publication_link:\n link_parts = publication_link.get('href').split('/')\n # of the form /manga/magazine/1/Big_Comic_Original\n manga_info[u'serialization'] = self.session.publication(int(link_parts[3])).set({'name': publication_link.text})\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return manga_info", "def parse_page(\n page: Page, render_function: Callable[[str], str] = None\n) -> Result:\n hosts, date = parse_top_section(page)\n sections = wtp.parse(page.text()).sections\n talks = parse_talks(sections, render_function)\n return {\n \"hosts\": hosts,\n \"date\": date,\n \"talks\": talks\n }", "def show_episodes(self, show_id, limit=50, offset=0, market=None):\n\n trid = self._get_id(\"show\", show_id)\n return self._get(\n \"shows/\" + trid + \"/episodes/\", limit=limit, offset=offset, market=market\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawl the show listings pages. Return structured show info.
def crawl_show_listings(): # Crawl concerts in order, starting with the first show base_url = "http://www.dead.net" next_url = "http://www.dead.net/show/may-05-1965" results = [] while next_url: response, cached = cache_request(next_url) status = response.status_code logging.info("Response %d Cached? %s" % (status, cached)) if status == 200: parsed_result = parse_show_page(response) next_url_relative = parsed_result.pop('next') results.append(parsed_result) if next_url_relative: next_url = base_url + next_url_relative else: next_url = None return results
[ "def parse_show_page(response):\n # Parse the html\n soup = BeautifulSoup(response.text)\n\n # Find the data on the page\n\n venue_el = soup.find('h3').a\n venue = venue_el.string\n\n h4_els = soup.findAll('h4')\n\n date_el = h4_els[0]\n date = date_el.string\n\n location_el = h4_els[1]\n location = location_el.string\n\n next_page_url = None\n next_page_anchors = soup.select('div.nextshow a')\n if next_page_anchors:\n next_page_el = next_page_anchors[0]\n next_page_url = next_page_el.get('href')\n\n return {\"date\": date, \"location\": location, \"venue\": venue, \"next\": next_page_url}", "def arteplus7_listshows(self):\n\n # TODO : rework the shows list a little ?\n return _arteplus7_getshowslist(self.url)", "def load_live_shows(self):\n\n if not self.shows:\n # thanks, obama\n user_agent = (\"Mozilla/5.0 (Windows NT 6.3; rv:36.0) \"\n \"Gecko/20100101 Firefox/36.0\"\n )\n headers = {'user-agent': user_agent}\n\n venue_html = requests.get(self.url, headers=headers)\n venue_html.raise_for_status()\n venue_html = venue_html.text\n self.make_shows(venue_html)\n else:\n print(\"Shows list already populated\")", "def _arteplus7_getshowslist(url):\n # Get the raw page\n # TODO : separate the root URL from the arguments part.\n jsonPage = urllib.request.urlopen(url).read().decode('utf-8')\n\n # Parse the raw page\n j = json.loads(jsonPage)\n\n # The result is nicely formatted.\n #\n # {'duration': 75, 'title': 'Patrice Chéreau, le corps au travail',\n # 'airdate_long': 'dimanche 13 octobre à 14h50', 'image _url':\n # 'http://www.arte.tv/papi/tvguide/images/7676790-CM/W940H530/7676790-CM.jpg',\n # 'video_rights_until': 'Plus que 161 h32', 'url':\n # '/guide/fr/041037-000/patrice-chereau-le-corps-au-travail',\n # 'video_views': '2 242 vues', 'video_channels': 'Arts, Cultures &\n # Spectacles', 'video_rank': 0, 'desc': 'Portrait intime de Patrice\n # Chéreau, artiste à la puissance créatrice inépuisable.'\n results = j[\"videos\"]\n \n # Filter out 'null' values\n return map(lambda res: {k:v for k,v in res.items() if v is not None}, results)", "def find_shows(\n\t\tself,\n\t\tname = None,\n\t\turl = None):\n\n\t\tdef dictionary_to_sql_where(d):\n\t\t\treturn ' AND '.join(list(map(lambda v: v+\" LIKE :\"+v,d)))\n\n\t\tassert name == None or isinstance(name,str)\n\t\tassert url == None or isinstance(url,str)\n\t\tassert (name != None or url != None)\n\n\t\tself.__update_show_cache()\n\n\t\twhere_dict = {}\n\n\t\tif name != None:\n\t\t\twhere_dict['name'] = '%'+name+'%'\n\t\tif url != None:\n\t\t\twhere_dict['url'] = url\n\n\t\tresult_rows = self.sql.execute(\n\t\t\t\t'SELECT name,url FROM show WHERE ' +\n\t\t\t\t\tdictionary_to_sql_where(\n\t\t\t\t\t\twhere_dict),\n\t\t\t\twhere_dict)\n\n\t\tresult_shows = []\n\t\tfor row in result_rows:\n\t\t\tif not row['url'] in self.show_url_to_show:\n\t\t\t\tself.show_url_to_show[row['url']] = Show(\n\t\t\t\t\t\tself.config_file,\n\t\t\t\t\t\tself.sql,\n\t\t\t\t\t\tself.downloader,\n\t\t\t\t\t\tself.html_converter,\n\t\t\t\t\t\tself.xquery_processor,\n\t\t\t\t\t\trow['name'],\n\t\t\t\t\t\trow['url'],\n\t\t\t\t\t\tself.percent_callback_creator)\n\t\t\tresult_shows.append(\n\t\t\t\tself.show_url_to_show[row['url']])\n\n\t\tsjmanager.log.log(\"Got {} shows\".format(len(result_shows)))\n\n\t\treturn result_shows", "def parse(self, response):\n theater_list = response.xpath('//li[@class=\"clearfix\"]')\n for theater_element in theater_list:\n cinema_name = theater_element.xpath(\n './p[@class=\"theaterName\"]/a/text()').extract_first()\n data_proto = ShowingLoader(response=response)\n data_proto.add_cinema_name(cinema_name)\n cinema_name = data_proto.get_output_value('cinema_name')\n if not self.is_cinema_crawl([cinema_name]):\n continue\n curr_cinema_url = theater_element.xpath(\n './p[@class=\"theaterName\"]/a/@href').extract_first()\n data_proto.add_cinema_site(\n response.urljoin(curr_cinema_url), cinema_name)\n data_proto.add_value('source', self.name)\n cinema_name_en = curr_cinema_url.split('/')[-1]\n json_url = self.generate_cinema_schedule_url(\n cinema_name_en, self.date)\n request = scrapy.Request(json_url, callback=self.parse_cinema)\n request.meta[\"data_proto\"] = data_proto.load_item()\n yield request", "def fetch_listing_pages():\n # startURL = u\"http://www.daft.ie/ireland/houses-for-rent\"\n startURL = u\"http://www.daft.ie/ireland/houses-for-rent/?s%5Bignored_agents%5D%5B0%5D=5732&s%5Bignored_agents%5D%5B1%5D=428&s%5Bignored_agents%5D%5B2%5D=1551&offset=1960\"\n totalpages = mop_listing_pages(startURL, count = 195)\n print(\"\".join([str(totalpages),\n u\" listing pages saved to disk.\"]).encode('utf-8'))", "def get_episode_links(self, show_name):\n\n self.search_function(show_name)\n\n print (\"Obtaining all season links...\", end =\"\")\n season_links = self.get_season_list()\n print (\"DONE\")\n\n links = []\n\n for title, season in season_links:\n\n print (\"Obtaining episode links for season: \" + title, end =\"...\")\n episode_links = self.get_episode_links_from_season_link(season)\n #for now ignore the link title\n links = links + [link for episode_title, link in episode_links]\n\n print (\"DONE\")\n\n return links", "def shows(self, shows, market=None):\n\n tlist = [self._get_id(\"show\", s) for s in shows]\n return self._get(\"shows/?ids=\" + \",\".join(tlist), market=market)", "def infoshows(sourcename, pattern):\n \n # Checks that the source is supported\n if sourcename not in SOURCES:\n raise UnsupportedSourceError(sourcename)\n \n source = SOURCES[sourcename]\n\n if 'infoshows' not in source.features:\n raise UnsupportedFeatureError(sourcename, 'infoshows')\n\n sourcefeat = source.features['infoshows']\n \n # Pass the pattern\n sourcefeat.pattern = pattern\n \n # Launch the infoshows feature\n shows = sourcefeat.do()\n\n # Print back the shows nicely\n for s in shows:\n print(s['title'])\n print('-' * len(s['title']))\n \n if 'duration' in s:\n print(str(s['duration']) + ' mn', end='')\n \n if 'airdate_long' in s:\n if 'duration' in s:\n print(', ', end='')\n \n print('first aired ' + str(s['airdate_long']), end='')\n \n if 'video_rights_until' in s:\n if 'duration' in s or 'airdate_long' in s:\n print(' ', end='')\n \n print('(available for ' + str(s['video_rights_until']) + ')', end='')\n \n print('')\n \n if 'video_channels' in s:\n print('Tags: ' + s['video_channels'])\n \n if 'url' in s: \n print('Link: ' + 'http://' + source.netloc + s['url'])\n \n print('')\n \n if 'desc' in s:\n print(s['desc'])\n \n print('')\n \n if 'video_views' in s:\n print(str(s['video_views']) + ' views', end='')\n \n if 'video_rank' in s:\n if 'video_views' in s:\n print(' - ', end='')\n \n print('video got rank ' + str(s['video_rank']) + ' on ' + source.netloc, end='')\n \n print('')\n print('')", "def listshows(sourcename):\n \n # Checks that the source is supported\n if sourcename not in SOURCES:\n raise UnsupportedSourceError(sourcename)\n \n source = SOURCES[sourcename]\n\n if 'listshows' not in source.features:\n raise UnsupportedFeatureError(sourcename, 'listshows')\n\n sourcefeat = source.features['listshows']\n\n # Launch the listshows feature\n shows = sourcefeat.do()\n\n # Print back the shows nicely\n for s in shows:\n print(s['title'], end='')\n\n if 'airdate_long' in s:\n print( ' /' + str(s['airdate_long']), end='')\n\n if 'duration' in s:\n print( ' (' + str(s['duration']) + ' mn)', end='')\n\n if 'desc' in s:\n if hasattr(sourcefeat, 'desclen'):\n print( ' - ' + s['desc'][:sourcefeat.desclen], end='')\n else:\n print( ' - ' + s['desc'], end='')\n\n # The nice touch...\n if OPTIONS_LISTSHOWS_DESC_LEN< len(s['desc']):\n print('...')\n else:\n print('')", "def get_episodes():\r\n tvshow = \"\"\r\n tvshows = {\r\n \"game of thrones\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=jUJfW_j2DISOvQTrmZHwBA&q=game+of+thrones+episodes&oq=game+o+episodes&gs_l=psy-ab.1.0.0i7i30k1l10.52520.53781.0.55237.6.6.0.0.0.0.362.529.0j1j0j1.2.0....0...1.1.64.psy-ab..4.2.523....0.07UT2XT-nX4\", # noqa\r\n \"castle rock\": \"https://www.google.co.in/search?q=castle+rock+episodes&stick=H4sIAAAAAAAAAONgFuLVT9c3NEw2K8pKL042VkLlakllJ1vpl5QBUXxBUX56UWKuVWpBZnF-SmoxALHeYSM8AAAA&sa=X&ved=2ahUKEwj715fQpMfcAhWGro8KHSK3BIUQMTA5egQIDRBD&biw=1366&bih=662\", # noqa\r\n \"orange is the new black\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=eUNfW5nCEYjlvAS1ja6IDg&q=orange+is+the+new+black+episodes&oq=+oraepisodes&gs_l=psy-ab.3.0.0i7i30k1l3.73181.75732.0.77105.10.10.0.0.0.0.197.1249.0j7.7.0....0...1.1.64.psy-ab..3.6.1070...0i7i10i30k1j0i8i10i30k1j0i67k1.0.KKD0uo55zFc\", # noqa\r\n \"suits\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=1UNfW6mcGcXnvASp-45Y&q=suits+episodes&oq=Sulits+episodes&gs_l=psy-ab.3.0.0i13k1l10.100383.103892.0.105529.8.8.0.0.0.0.294.1276.0j3j3.6.0....0...1.1.64.psy-ab..2.6.1261...0i7i30k1j0i67k1.0.z7eTUNw7kI0\", # noqa\r\n \"the flash\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=RURfW5uVBcfivASXobjAAw&q=the+flash+episodes&oq=theflas+episodes&gs_l=psy-ab.3.0.0i13k1l10.121800.125333.0.127277.9.8.1.0.0.0.246.661.0j1j2.3.0....0...1.1.64.psy-ab..5.4.673...0i7i30k1j0i10k1.0.rNJJNmiWmeI\", # noqa\r\n \"jessica jones\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=0ERfW7u6IY7EvwSa-r-4Dw&q=jessica+jones+episodes&oq=Jess+episodes&gs_l=psy-ab.3.2.0i7i30k1l10.429044.431792.0.433171.4.4.0.0.0.0.285.915.0j2j2.4.0....0...1.1.64.psy-ab..0.4.906....0.bt0PY6CGPJs\", # noqa\r\n \"sherlock\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=ikZfW_B4xeG-BK7Pm7AP&q=sherlock+episodes&oq=sher+episodes&gs_l=psy-ab.3.0.0i7i30k1l10.115543.116200.0.117240.4.4.0.0.0.0.204.759.0j3j1.4.0....0...1.1.64.psy-ab..0.4.746....0.CGkqZHrozHk\", # noqa\r\n \"the fall\": \"https://www.google.co.in/search?ei=rqRgW4ajF4O5rQHXt5jQDA&btnG=Search&q=the+fall+episodes\", # noqa\r\n \"13 reasons why\": \"https://www.google.co.in/search?ei=3qRgW4CLBYX7rQHRvJKYDA&q=13+reasons+why+episodes&oq=13+reasons+why+episodes&gs_l=psy-ab.3...35.7078.0.7552.18.18.0.0.0.0.0.0..0.0....0...1c.1.64.psy-ab..18.0.0....0.VHfUUA_T0WQ\"} # noqa\r\n while tvshow.lower() not in tvshows.keys():\r\n tvshow = input(\"Which tv show you want to know about.\\n\"\r\n \"Please provide the name\\n [The Names are:\"\r\n \"\\nGame of thrones,\\nCastle Rock,\\nOrange Is the\"\r\n \" New Black,\\nSuits,\\nThe Flash,\\nJessica Jones,\"\r\n \"\\nSherlock,\\nThe Fall,\\n13 Reasons Why]\\n\")\r\n if tvshow.lower() not in tvshows.keys():\r\n print(\"Please provide the correct name of the Show\")\r\n else:\r\n tv = tvshows[tvshow.lower()]\r\n print('-'*80)\r\n return tv", "def parse_hotellist_page(html):\n soup = BeautifulSoup(html)\n # Extract hotel name, star rating and number of reviews\n hotel_boxes = soup.findAll('div', {'class' :'listing wrap reasoning_v5_wrap jfy_listing p13n_imperfect'})\n if not hotel_boxes:\n print(\"#################################### Option 2 ######################################\")\n hotel_boxes = soup.findAll('div', {'class' :'listing_info jfy'})\n if not hotel_boxes:\n print(\"#################################### Option 3 ######################################\")\n hotel_boxes = soup.findAll('div', {'class' :'listing easyClear p13n_imperfect'})\n\n for hotel_box in hotel_boxes:\n hotel_name = hotel_box.find(\"a\", {\"target\" : \"_blank\"}).find(text=True)\n print(\"Hotel name: %s\" % hotel_name.strip())\n\n stars = hotel_box.find(\"img\", {\"class\" : \"sprite-ratings\"})\n if stars:\n print(\"Stars: %s\" % stars['alt'].split()[0])\n\n num_reviews = hotel_box.find(\"span\", {'class': \"more\"}).findAll(text=True)\n if num_reviews:\n print(\"Number of reviews: %s \" % [x for x in num_reviews if \"review\" in x][0].strip())\n\n # Get next URL page if exists, otherwise exit\n #div = soup.find(\"div\", {\"class\" : \"unified pagination \"})\n div = soup.find(\"div\", {\"class\" : \"pagination paginationfillbtm\"})\n\n # check if this is the last page\n if div.find('span', {'class' : 'guiArw pageEndNext'}):\n print(\"We reached last page\")\n return None\n # If not, return the url to the next page\n hrefs = div.findAll('a', href= True)\n for href in hrefs:\n if href.find(text = True) == '&raquo;':\n print(\"Next url is %s\" % href['href'])\n return href['href']", "def parse_listing(self, response):\n\n # scrape all cards\n for card in response.css('table.listing tbody a[href*=\"cards\"]::attr(href)'):\n card_url = response.urljoin(card.extract())\n yield scrapy.Request(card_url, callback=self.parse_card)\n\n # debug\n # break\n\n # parse the next page (if it exists)\n next_page_links = response.css('li.b-pagination-item a[rel*=\"next\"]::attr(href)').extract()\n if len(next_page_links) > 0:\n yield scrapy.Request(self.url_base + next_page_links[0], callback=self.parse_listing)", "def _GetTitleAndIDList(self):\n # Populate self._allShowList if it does not already exist\n if self._allShowList is None:\n self._GetAllShowList()\n self._ParseShowList()", "def parse_overview_page1(self, response):\n\t\tcomm = response.meta['comm'] # the private/commercial indicator\n\t\t#cityid = response.meta['cityid'] # the id of the city of which we look for the ads (as string)\n\t\t# find the number of pages in total and open all other pages from 1,...,last page\n\t\tif len(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong')) > 1:\n\t\t\tnumpages = int(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong[2]/text()').extract()[0])\n\t\t\tfor pageno in xrange(1,numpages+1):\n\t\t\t\t# we have to re-post our form for the filter settings\n\t\t\t\t#request = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno), 'cityid': cityid},\n\t\t\t\t#\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno)},\n\t\t\t\t\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\t\t\t# find the immoscout ads for this site\n\t\t\t\trequest = scrapy.Request('http://www.quoka.de/qs/qpc/xmlSearch.php?search=&view=quoka&platform=desktop&catid=27_2710&maxresults=20&page=' +str(pageno)+\n\t\t\t\t\t\t\t\t\t\t'&output=json&oe=UTF-8', callback=self.parse_immoscout)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\telse:\n\t\t\t# in this case there is no \"Seite 1 von n\", so we simply scrape this page\n\t\t\trequest = scrapy.Request(response.url, callback=self.parse_overview_page2)\n\t\t\trequest.meta['comm'] = comm\n\t\t\tyield request", "def parse_site_page(url):\n\n import re\n\n url_request = get_request(url)\n soup = BeautifulSoup(url_request, 'html.parser')\n\n pattern = re.compile(r'entry+')\n div_tags = soup.find_all('div', id=pattern)\n\n return_list = []\n for div in div_tags:\n a_tag = div.find('a')\n name = a_tag.find('h2').text\n link = a_tag.get('href') # link on anime\n\n anime_request = get_request(link)\n anime_soap = BeautifulSoup(anime_request, 'html.parser') # html of anime page\n\n description = anime_soap.find('div', {'class': 'kino-desc full-text clearfix noselect'}).text.replace('\\n', '')\n\n anime_ul = anime_soap.find('ul', {'class': 'kino-lines ignore-select'})\n ul_links = anime_ul.find_all('a')\n genre = ' '.join(a.text for a in ul_links if a.text in GENRES)\n\n rating = anime_soap.find('ul', {'class': 'unit-rating'}).find('li').text\n\n image_url = 'http://baza1.animevost.tv/' + anime_soap.find('a', {'class': 'highslide'}).find('img').get('src')\n\n return_list.append({\n 'name': name,\n 'link': link,\n 'genre': genre,\n 'rating': rating,\n 'description': description,\n 'image': image_url\n })\n\n return return_list", "def extract_links_from_url(self, url):\n with HTMLSession() as s:\n res = s.get(url, verify=False)\n\n id_list = re.findall(\n u'https://www.meijumi.net/(\\d+)\\.html', res.html.html)\n id_list = list(set(id_list)) # remove duplicates\n print(\"{} unique show id found\".format(len(id_list)))\n shows_extracted = [Show(meijumi_id=_id) for _id in id_list]\n shows_added = self.append_many(shows_extracted)\n return shows_added", "def list_sections(show_id):\n\n # Get show sections\n if show_id in SHOW_SECTIONS:\n sections = SHOW_SECTIONS[show_id]\n else:\n SHOW_SECTIONS[show_id] = content.get_show_sections(SHOWS_BY_ID[show_id])\n sections = SHOW_SECTIONS[show_id]\n\n # Set plugin category. It is displayed in some skins as the name of the current section.\n xbmcplugin.setPluginCategory(_handle, SHOWS_BY_ID[show_id]['title'])\n # Set plugin content. It allows Kodi to select appropriate views for this type of content.\n xbmcplugin.setContent(_handle, 'videos')\n\n # Iterate through sections\n for key, section in sections.items():\n # Create a list item with a text label and a thumbnail image.\n list_item = xbmcgui.ListItem(label=section['title'])\n # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.\n list_item.setArt({'thumb': section['thumb'],\n 'icon': section['thumb'],\n 'fanart': section['thumb']})\n # Set additional info for the list item.\n # For available properties see the following link:\n # http://mirrors.xbmc.org/docs/python-docs/15.x-isengard/xbmcgui.html#ListItem-setInfo\n list_item.setInfo('video', {'title': section['title']})\n # Create a URL for a plugin recursive call.\n # Example: plugin://plugin.video.example/?action=listing&category=Animals\n url = get_url(action='videos-listing', show=show_id, section=key)\n # is_folder = True means that this item opens a sub-list of lower level items.\n is_folder = True\n # Add our item to the Kodi virtual folder listing.\n xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n # Finish creating a virtual folder.\n xbmcplugin.endOfDirectory(_handle)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of data dicts scraped from dead.net. Returns a list of unique geocodable locations.
def unique_show_locations(listings): listing_geocodable = ['%s, %s' % (listing['venue'], listing['location']) for listing in listings] unique_geocodable = sorted(set(listing_geocodable)) return unique_geocodable
[ "def go_get_data(postcodes,dataset,pathToData=''):\n results = []\n geoAreas = []\n for postcode in postcodes:\n pc = adjustpostcode(postcode)\n pathToData = ''\n conn = lite.connect(pathToData+'geo.db')\n geodb = conn.cursor() \n c_oa = geodb.execute(\"SELECT oa11, lat, long FROM geo WHERE pcd=?;\",(pc,));\n oa = None;\n for r in c_oa:\n results.append({'oa':str(r[0]),'lat':r[1],'lon':r[2],'postcode':postcode})\n geoAreas.append(str(r[0]))\n\n geoAreaslist = ','.join(geoAreas) \n #QS414EW\n #url = \"http://web.ons.gov.uk/ons/api/data/dataset/QS102EW.xml?context=Census&apikey=cHkIiioOQX&geog=2011STATH&diff=&totals=false&dm/2011STATH=%s\" % geoAreaslist\n url = \"http://web.ons.gov.uk/ons/api/data/dataset/%s.xml?context=Census&apikey=cHkIiioOQX&geog=2011STATH&diff=&totals=false&dm/2011STATH=%s\" % (dataset,geoAreaslist)\n response = urllib2.urlopen(url)\n xmlstring = response.read();\n xmlstring = re.sub('(xmlns:[^=]*)=\"[^\"]*\"', '\\\\1=\"_\"', xmlstring)\n root = ET.fromstring(xmlstring);\n \n data_results = {}\n for a in root.findall(\"{_}genericData/{_}DataSet/{_}Group/{_}Series\"):\n loc = a.find(\"{_}SeriesKey/{_}Value[@concept='Location']\")\n if loc is None: \n continue\n location_string = loc.attrib['value']\n if location_string not in data_results:\n data_results[location_string] = []\n for dp in a.findall(\"{_}Obs/{_}ObsValue\"):\n data_string = dp.attrib['value']\n data_results[location_string].append( float(data_string) )\n \n for res in results:\n for i,d in enumerate(data_results[res['oa']]):\n res[dataset+\"_%d\" % i] = d\n #res[dataset] = data_results[res['oa']]\n return results", "def list_locations():", "def locations_outdoors(self, json_data: []) -> ['locations']:\n valid_locations = []\n for data in json_data:\n try:\n if data[25] == 0:\n valid_locations.append(data)\n except:\n pass # Reached a null reference exception, ignores that piece of data\n return valid_locations", "def get_location_list(state_alert_list):\n locations = []\n for item in state_alert_list:\n locations.append([item[\"lat\"], item[\"lon\"]])\n return locations", "def get_all_locations(self, input_df):\n return set(pd.unique(input_df[self._LOCATION_COLUMN_NAME]))", "def get_locations(input_file, output_file):\n with zipfile.ZipFile(input_file, 'r') as z:\n for filename in z.namelist():\n with z.open(filename) as f:\n json_list = json.load(f)\n\n state_locations = _generate_state_dictionary()\n location_dict = {}\n for item in json_list:\n location_dict[item['id']] = re.split(r'[`\\-=~!@#$%^&*()_+\\[\\]{};\\'\\\\:\"|<,./<>?]', item['location'].lower())\n state_flag = 0\n us_flag = 0\n cnt = 0\n for user_id in location_dict:\n for item in location_dict[user_id]:\n for state in state_locations:\n if item == state:\n location_dict[user_id] = state\n state_flag = 1\n break\n if state_flag == 1:\n break\n if state_flag == 0:\n for item in location_dict[user_id]:\n for state in state_locations:\n if item.strip() in state_locations[state]:\n location_dict[user_id] = state\n state_flag = 1\n break\n if state_flag == 1:\n break\n if state_flag == 1:\n state_flag = 0\n else:\n for item in location_dict[user_id]:\n if item.strip() == 'us' or item.strip() == 'usa' or item.strip() == 'united states':\n location_dict[user_id] = 'usa'\n us_flag = 1\n break\n if us_flag == 1:\n us_flag = 0\n else:\n location_dict[user_id] = 'N/A'\n cnt += 1\n\n with open(output_file, 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['user_id', 'location'])\n for data in location_dict:\n writer.writerow([data, location_dict[data]])", "def extract_geo_from_tweetlist(tweetlist):\n geolist = []\n for tweet in tweetlist:\n try:\n geo = tweet[\"geo\"]['coordinates']\n geolist.append(geo)\n except TypeError:\n geolist.append(None)\n return geolist", "def return_location_coords(self, json_data: []) -> [str]:\n valid_coords = []\n for data in json_data:\n valid_coords.append((str(data[27]) + ' ' + str(data[28])))\n\n return valid_coords", "def ip_data(self):\r\n geo_data = []\r\n\r\n print(\"Retrieving IP locations from the GeoLite2 data set.\")\r\n for ip in self.raw_ip_data:\r\n try:\r\n d = self.reader.get(ip['IpAddress'])\r\n d['IpAddress'] = ip['IpAddress']\r\n geo_data.append(d)\r\n except ValueError:\r\n continue\r\n except TypeError:\r\n continue\r\n\r\n return geo_data", "def all_locations(self) -> List[Tuple[str, str, str]]:\n query = \"\"\"\n SELECT\n DISTINCT coa_summary_view.site_name,\n coa_summary_view.town,\n coa_summary_view.county\n FROM coa.coa_summary_view\n \"\"\"\n with self.connection as cursor:\n cursor.execute(query)\n return cursor.fetchall()", "def location_list(self):\n \n self._send(\"location_list\")\n return [e2string(x) for x in self._read_json(220)]", "def locations_to_coords(locations):\n coords_final = []\n\n coords_range = 9 if len(locations) >= 10 else len(locations)\n\n for point in range(coords_range):\n coordinates = geolocator.geocode(locations[point])\n if coordinates != None:\n coords_final.append((coordinates.latitude, coordinates.longitude))\n return coords_final", "def getLocationData(self, filepath):\n geoData = []\n files = os.listdir(filepath)\n for afile in files:\n with open(filepath+afile) as r:\n data = json.loads(r.read())\n for entry in data:\n geoData.append(entry)\n return geoData", "def fetch_distinct_locations(self, query_dict):\n cursor = self.connection.cursor()\n query = \"\"\"SELECT DISTINCT geo.location\n FROM `cnx_logger` log\n INNER JOIN `cnx_logger_biomimic_type` biotype\n ON biotype.`biomimic_id`=log.`biomimic_id`\n INNER JOIN `cnx_logger_geographics` geo\n ON geo.`geo_id`=log.`geo_id` \"\"\"\n where_condition = self.build_where_condition(query_dict)\n cursor.execute(query + where_condition + \" ORDER BY 1 ASC\")\n result = cursor.fetchall()\n final_result = [row[0] for row in result]\n cursor.close()\n count_records, min_date, max_date = self.fetch_metadata(query_dict)\n return final_result, count_records, min_date, max_date", "def origin_city_list(self, data):\n\n\t\t\n\t\torigin_location = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\torigin_location1 = node['tir38:OriginLocation'].get_xml_attr('LocationCode')\n\t\t\t\torigin_location.append(str(origin_location1))\n\t\texcept:\n\t\t\torigin_location = [\"N/A\"]\n\n\t\treturn origin_location", "def get_locations_for(username):\n locations = []\n with sql.connect(database_locations) as cur:\n if username == 'admin':\n res = cur.execute(f\"\"\"\n SELECT DISTINCT * \n From Location \n ORDER BY tid, tst DESC;\n \"\"\")\n else:\n res = cur.execute(f\"\"\"\n SELECT DISTINCT * \n From Location\n WHERE tid='{username}' \n ORDER BY tst DESC;\n \"\"\")\n for tid, lon, lat, city, road, _date, _time, tst, in res:\n locations.append([tid, lon, lat, city, road, _date, _time])\n return locations", "def locations():\n status, body = api.locations(city='Delhi')\n return str(body['results'])", "def parse_location_links(doc: BeautifulSoup) -> List[str]:\n # data is available at: html > table[id=example]\n datatable = doc.find(id=\"example\")\n if datatable is None:\n logger.warn(\n \"datatable not found, individual location pages will not be fetched\"\n )\n return []\n anchors = datatable.find_all(\"a\")\n urls: List[str] = []\n for a in anchors:\n href = a.attrs[\"href\"]\n if not href:\n continue\n url = urljoin(start_url, href)\n urls.append(url)\n return urls", "def do_get_many_geonames_place_data(gcdroot, options):\n emplaces_rdf = None # Graph created on first loop below\n geonames_ids = get_many_geonames_ids()\n if not geonames_ids:\n return GCD_NO_GEONAMES_IDS\n for geonames_id in geonames_ids:\n try:\n emplaces_rdf = get_geonames_id_data(\n gcdroot, geonames_id, emplaces_rdf=emplaces_rdf\n )\n except Exception as e:\n log.error(\n \"Error getting data for GeoNames Id %s\"%(geonames_id), \n exc_info=True\n )\n get_common_defs(options, emplaces_rdf)\n print(emplaces_rdf.serialize(format='turtle', indent=4), file=sys.stdout)\n return GCD_SUCCESS" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the max item + removes it from the heap. Check if node is in correct position not violating heap properties.
def removeMax(self): max = self.get_max() #swap last element with root node self.swap(0,self.heap_size-1) #update the size self.heap_size = self.heap_size - 1 #move the root node down the heap to not violate heap properties. self.downHeap(0) return max
[ "def pop(self):\n if len(self._items) == 0:\n raise LookupError('pop from empty heap')\n # else:\n # swap top item with the last item of self._items, and remove it\n _swap(self._items, 0, -1)\n min_item = self._items.pop()\n # now repair the heap property\n _shift_down(self._items, 0, self._less)\n # return\n return min_item", "def del_max(self):\n extracted_max = self.heaplist[0]\n self.heaplist[0] = self.heaplist[-1]\n self.heaplist.pop()\n i = 0\n length = len(self.heaplist)\n while i < length//2:\n l_idx = 2*i + 1\n r_idx = 2*i + 2\n if r_idx > length-1:\n if self.heaplist[i] < self.heaplist[l_idx]:\n temp = self.heaplist[l_idx]\n self.heaplist[l_idx] = self.heaplist[i]\n self.heaplist[i] = temp\n i = l_idx\n else:\n break\n else:\n if (self.heaplist[i] >= self.heaplist[l_idx]) and (self.heaplist[i]>= self.heaplist[r_idx]):\n break\n \n else:\n if self.heaplist[l_idx] == self.heaplist[r_idx]:\n max_idx = r_idx\n val = self.heaplist[r_idx]\n else: \n to_swap = {l_idx: self.heaplist[l_idx], r_idx:self.heaplist[r_idx]} \n max_idx, val = max(to_swap.items(), key = lambda x:x[1])\n self.heaplist[max_idx] = self.heaplist[i]\n self.heaplist[i] = val\n i = max_idx\n \n return extracted_max", "def extract_max(self):\n max_element = self.__heap[1]\n last_element = self.__heap[-1]\n self.heap_size -= 1\n self.__heap[1] = last_element\n self.max_heapify(1)\n self.__heap.pop()\n return max_element", "def dequeue(self): \n #if Priority Queue is empty\n if self.is_empty():\n raise IndexError(\"Deletion is not Possible Because Priority Queue is Empty\")\n else:\n \t#since we are using unsorted array so we have to loop through items to find highest priority element\n \t#find the element with highest priority and delete it from Priority Queue\n highest=self.items[0]\n index=0\n for i in range(len(self.items)):\n \tif self.items[i]>highest:\n \t\thighest=self.items[i]\n \t\tindex=i\n\n del self.items[index] # deleting highest priority element\n return highest", "def remove(self):\n maxi = 0\n for i in range(1, len(self.items)):\n if self.items[i] > self.items[maxi]:\n maxi = i\n item = self.items[maxi]\n del self.items[maxi]\n return item", "def removeMin(self):\r\n if self._heap:\r\n minElem = self.min()\r\n element = self._heap.pop()\r\n # get element at bottom of heap\r\n if len(self._heap) > 0:\r\n element._index = 0\r\n self._heap[0] = element\r\n # swap element at bottom of heap into top\r\n self.bubbleDown(element)\r\n return minElem\r\n else:\r\n return None", "def get(self):\n\n while self.heap:\n priority, node = heapq.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n self.size -= 1\n return node\n raise KeyError('pop from an empty priority queue')", "def remove_min(self) -> object:\n \n # handle empty MinHeap\n if self.is_empty():\n raise MinHeapException\n\n # swap root and last element and pop root off underlying array\n end = self.heap.length() - 1\n self.heap.swap(0, end)\n root = self.heap.pop()\n \n # percolate root element back down to its correct spot\n self.percolate_down(0)\n\n # return popped off former root minimum element\n return root", "def extract_max(S):\n if is_max_heap(S,0):\n ROOT=root(S)\n swap(S,0,heap_size(S)-1)\n del S[-1]\n if not is_max_heap(S,0):\n max_heapify(S,0)\n return ROOT\n else:\n # Max to non max_heaps\n __MAXKEY__=None\n __MAX__=None\n key=0\n for x in S:\n if x > __MAX__:\n __MAX__= x\n __MAXKEY__= key\n\n key+=1\n del S[key]\n return __MAX__", "def remove_min(self): # 5\r\n if self.is_empty():\r\n raise Empty('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def remove_top_item(values, count):\r\n # Save the top item to return later.\r\n result = values[0]\r\n\r\n # Move the last item to the root.\r\n values[0] = values[count - 1]\r\n\r\n # Restore the heap property.\r\n index = 0\r\n while True:\r\n # Find the child indices.\r\n child1 = 2 * index + 1\r\n child2 = 2 * index + 2\r\n\r\n # If a child index is off the end of the tree,\r\n # use the parent's index.\r\n if child1 >= count:\r\n child1 = index\r\n if child2 >= count:\r\n child2 = index\r\n\r\n # If the heap property is satisfied, we're done.\r\n if (values[index] >= values[child1]) and \\\r\n (values[index] >= values[child2]):\r\n break\r\n\r\n # Get the index of the child with the larger value.\r\n if values[child1] > values[child2]:\r\n swap_child = child1\r\n else:\r\n swap_child = child2\r\n\r\n # Swap with the larger child.\r\n values[index], values[swap_child] = values[swap_child], values[index]\r\n\r\n # Move to the child node.\r\n index = swap_child\r\n\r\n # Return the value we removed from the root.\r\n return result", "def get_top_and_heapify(self):\n top_ele = None\n if self.items:\n # put last as first\n self.items[0], self.items[-1] = self.items[-1], self.items[0]\n top_ele = self.items.pop() # now that it is swapped, get the last\n self.heapify(self.items, 0)\n\n return top_ele", "def delete(self, i):\n\t\tif i == len(self.heap.items) - 1:\n\t\t\treturn self.heap.items.pop()\n\t\tdeleted = self.heap.items[i]\n\t\tself.heap.items[i] = self.heap.items.pop()\n\t\tkey = self.heap.eval\n\t\tif i == 1:\n\t\t\tself.heap.heapify_down(i)\n\t\telif key(self.heap.items[i]) < key(self.heap.items[i/2]):\n\t\t\tself.heap.heapify_up(i)\n\t\telse:\n\t\t\tself.heap.heapify_down(i)\n\t\treturn deleted", "def get_largest(stack: Stack) -> int:\n temp = Stack()\n largest = stack.remove()\n temp.add(largest)\n\n while not stack.is_empty():\n item = stack.remove()\n if item > largest:\n largest = item\n temp.add(item)\n\n while not temp.is_empty():\n stack.add(temp.remove())\n\n return largest", "def max_heap_insert(heap, item):\n heap.insert(0, item)\n max_heapify(heap, 0)\n #build_max_heap(heap)", "def remove(self, element):\r\n if element in self._heap:\r\n lastElem = self._heap.pop()\r\n # pop last elemnt in heap\r\n self._heap[element._index] = lastElem\r\n # put last element into where element is\r\n if lastElem < self.getParent(lastElem):\r\n self.bubbleUp(lastElem)\r\n else:\r\n self.bubbleDown(lastElem)\r\n return element\r\n else:\r\n return None", "def max(self) -> Tuple[K, V]:\n if self.is_leaf:\n for (k, v) in zip(reversed(self.keys), reversed(self.vals)):\n if k not in self.deleted:\n return (k, v)\n for child in reversed(self.children):\n if child.size > 0:\n return child.max()", "def max_heapify(arr):\n parent = ((len(arr) - 1) - 1 ) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1\n return", "def get_min(self):\n if len(self.nodes) == 0:\n raise IndexError('Heap is empty!')\n \n # Remove and replace min node with last added node\n self.swap_nodes(0, len(self.nodes) - 1)\n min = self.nodes.pop()\n\n self.adjust_heap_down()\n return min" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort N nodes in heap. Every removeMax operation called takes O(logN) because of downHeap()
def heap_sort(self): tempList = [] #store size of heap size = self.heap_size for i in range(0,size): #call removeMax N times to return max element and remove max every iteration max = self.removeMax() tempList.append(max) #print(max._key,max._value,max._price) for i in range(0,size): self.insert(tempList[i])
[ "def max_heap_sort(heap):\n build_max_heap(heap)\n result=[]\n\n for index in range(heap_size(heap)-1, -1, -1):\n heap[0], heap[-1] = heap[-1], heap[0]\n result += [heap.pop()]\n max_heapify(heap, 0)\n\n return result", "def heap_sort(arr):\n max_heapify(arr)\n for i in range(len(arr) - 1):\n dequeue(arr, len(arr) - 1 - i)\n return arr", "def heapSortNonAscending(A, n):\r\n buildHeapMin(A, n)\r\n size = n\r\n for _ in range(n):\r\n A[0], A[size-1] = A[size-1], A[0]\r\n size -= 1\r\n siftDownMin(A, 0, size)", "def heapsort(lista):\r\n\r\n heapify(lista, len(lista))\r\n end = len(lista)-1\r\n while end > 0:\r\n lista[end], lista[0] = lista[0], lista[end]\r\n end -= 1\r\n sift_down(lista, 0, end)", "def heapify(self):\n n = len(self.storage)\n # Transform bottom-up. The largest idx there's any point to looking at is\n # the largest with a child idx in-range, so must have 2*idx + 1 < n,\n # or idx\n for i in reversed(range(n // 2)):\n self._sift_up(i)", "def djikstra_heap(s=0):", "def heapsort(arr):\n #print(arr)\n # Define heap class instance\n heap = Heap()\n\n # Pass parameter LIST received to heap class instance\n for i in arr: \n heap.insert(i) \n\n # it-is max-heap so : TOP ROOT WILL BE ALWAYS GREATER THAN ALL CHILDREN \n # here delete() function gives largest one so the new formed LIST will be reverse\n for i in range(0, heap.get_size()):\n arr[i] = heap.delete() \n \n # print(arr)\n # as because of max-heap structure need to reverse list to have asecending order-list\n arr.reverse() \n \n return arr", "def heapify(list_, max_=False):\n n = len(list_)\n if max_:\n less = operator.gt\n else:\n less = operator.lt\n for i in reversed(range(n//2)):\n _shift_down(list_, i, less)", "def heap_sort(A):\n hs = HeapSort(A)\n hs.sort()", "def del_max(self):\n extracted_max = self.heaplist[0]\n self.heaplist[0] = self.heaplist[-1]\n self.heaplist.pop()\n i = 0\n length = len(self.heaplist)\n while i < length//2:\n l_idx = 2*i + 1\n r_idx = 2*i + 2\n if r_idx > length-1:\n if self.heaplist[i] < self.heaplist[l_idx]:\n temp = self.heaplist[l_idx]\n self.heaplist[l_idx] = self.heaplist[i]\n self.heaplist[i] = temp\n i = l_idx\n else:\n break\n else:\n if (self.heaplist[i] >= self.heaplist[l_idx]) and (self.heaplist[i]>= self.heaplist[r_idx]):\n break\n \n else:\n if self.heaplist[l_idx] == self.heaplist[r_idx]:\n max_idx = r_idx\n val = self.heaplist[r_idx]\n else: \n to_swap = {l_idx: self.heaplist[l_idx], r_idx:self.heaplist[r_idx]} \n max_idx, val = max(to_swap.items(), key = lambda x:x[1])\n self.heaplist[max_idx] = self.heaplist[i]\n self.heaplist[i] = val\n i = max_idx\n \n return extracted_max", "def repair_heap(array, start_index, heap_size):\n\n # Check given given parameter data type.\n if not type(array) == list:\n raise TypeError('array must be a list')\n\n # Assume current node is max\n max_index = start_index\n left_child_index = 2*start_index+1\n right_child_index = 2*start_index+2\n\n # Check if left child node exists and has higher value than parent node\n if left_child_index < heap_size and \\\n array[left_child_index] > array[max_index]:\n max_index = left_child_index\n\n # Check if right child node exists and has even higher value\n # than both parent and left child node\n if right_child_index < heap_size and \\\n array[right_child_index] > array[max_index]:\n max_index = right_child_index\n\n # Swap values if root is not max\n if max_index != start_index:\n array[max_index], array[start_index] \\\n = array[start_index], array[max_index]\n repair_heap(array, max_index, heap_size)\n\n return array", "def max_heapify(arr):\n parent = ((len(arr) - 1) - 1 ) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1\n return", "def heapsort(arr):\n pass", "def min_heapify(arr):\n parent = ((len(arr) - 1) - 1) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1", "def _sort(self):\n\t\tfor node in self.nodes_by_size:\n\t\t\tnode.resort()\n\t\tself.nodes_by_size.sort(\n\t\t\tkey=lambda node: node.used / node.capacity,\n\t\t\treverse=True)", "def sort_k(arr: list, n: int, k: int):\n heap = arr[:k + 1]\n heapify(heap)\n \n target_index = 0\n for rem_elmnts_index in range(k + 1, n):\n arr[target_index] = heappop(heap)\n heappush(heap, arr[rem_elmnts_index])\n target_index += 1\n \n while heap:\n arr[target_index] = heappop(heap)\n target_index += 1", "def get_top_nodes(self, n):\n nodes_n_heap = [] #heap\n top_n_nodes = []\n max_page_rank = 0\n for key in self.graph_dict:\n value = self.graph_dict[key]\n if value.get_page_rank() > max_page_rank:\n max_page_rank = value.get_page_rank()\n page_rank_node_name_tuple = (value.get_page_rank(), key)\n if n >= 0: # set heap size to n\n heappush(nodes_n_heap, page_rank_node_name_tuple)\n n -= 1\n else: # keep heap size to n\n heapreplace(nodes_n_heap, page_rank_node_name_tuple)\n heappop(nodes_n_heap) # Make sure we have top n nodes, so we had n+1 in heap so we wont pop the last node.\n self.switch_tuple_items(nodes_n_heap, top_n_nodes)\n print(\"Max page rank for confirmation is: \" + str(max_page_rank))\n return list(reversed(top_n_nodes))", "def sort_children(self):\n self._out_edges = sorted(self._out_edges, key=lambda e: -e.bottom.score, reverse=False)\n self._children_need_sorting = False", "def max_heapify(a, n, idx):\n max_idx = idx\n left = 2*idx + 1\n right = 2*idx + 2\n\n if left < n and a[left] > a[max_idx]:\n max_idx = left\n if right < n and a[right] > a[max_idx]:\n max_idx = right\n\n if max_idx != idx:\n a[max_idx], a[idx] = a[idx], a[max_idx]\n max_heapify(a, n, max_idx)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the a,b,c,d variables to the equation at the right spots.
def add_variables(equation, variables): for i, j in enumerate(range(2, 17, 4)): equation[j] = variables[i]
[ "def equation_p(self):\n\t\treturn f\"{self.a}x + {self.b}y + {self.c}z − {self.d} = 0\"", "def add_operations(equation, operations):\n for i, j in enumerate(range(3, 17, 5)):\n equation[j] = operations[i]", "def addEquations(self, node, makeEquations):\n nodeName = node.output[0]\n \n # Get the inputs\n inputName1, inputName2 = node.input\n shape1 = self.shapeMap[inputName1]\n shape2 = self.shapeMap[inputName2]\n self.shapeMap[nodeName] = shape1 \n \n # Decide which inputs are variables and which are constants\n firstInputConstant = False; secondInputConstant = False\n if inputName1 in self.constantMap:\n # Broadcast the constant input1 to the same shape as input2\n input1 = np.copy(self.constantMap[inputName1]) + np.zeros(shape2)\n firstInputConstant = True\n else:\n input1 = self.varMap[inputName1]\n \n if inputName2 in self.constantMap:\n # Broadcast the constant input2 to the same shape as input1\n input2 = np.copy(self.constantMap[inputName2]) + np.zeros(shape1)\n secondInputConstant = True\n else:\n input2 = self.varMap[inputName2]\n \n # The shape after broadcasting must match\n assert input1.shape == input2.shape\n self.shapeMap[nodeName] = shape1\n \n # If both inputs to add are constant, then the output is constant too\n # No new variables are needed, we just need to store the output in constantMap\n if firstInputConstant and secondInputConstant:\n self.constantMap[nodeName] = input1 + input2\n \n # If both inputs are variables, then we need a new variable to represent\n # the sum of the two variables\n elif makeEquations and not firstInputConstant and not secondInputConstant:\n outputVariables = self.makeNewVariables(nodeName)\n input1 = input1.reshape(-1)\n input2 = input2.reshape(-1)\n outputVariables = outputVariables.reshape(-1)\n for i in range(len(input1)):\n e = MarabouUtils.Equation()\n e.addAddend(1, input1[i])\n e.addAddend(1, input2[i])\n e.addAddend(-1, outputVariables[i])\n e.setScalar(0.0)\n self.addEquation(e)\n \n # Otherwise, we are adding constants to variables.\n # We don't need new equations or new variables if the input variable is the output of a linear equation.\n # Instead, we can just edit the scalar term of the existing linear equation.\n # However, if the input variables are not outputs of linear equations (input variables or outputs of \n # activation functions) then we will need new equations.\n elif makeEquations:\n if firstInputConstant:\n constInput = input1\n varInput = input2\n else:\n constInput = input2\n varInput = input1\n constInput = constInput.reshape(-1)\n varInput = varInput.reshape(-1)\n \n # Adjust equations to incorporate the constant addition\n numEquationsChanged = 0\n for equ in self.equList:\n (c,var) = equ.addendList[-1]\n assert c == -1\n if var in varInput:\n ind = np.where(var == varInput)[0][0]\n \n # Adjust the equation\n equ.setScalar(equ.scalar-constInput[ind])\n numEquationsChanged += 1\n \n # If we changed one equation for every input variable, then\n # we don't need any new equations\n if numEquationsChanged == len(varInput):\n self.varMap[nodeName] = varInput\n else:\n # Otherwise, assert no equations were changed, and we need to create new equations\n assert numEquationsChanged == 0\n outputVariables = self.makeNewVariables(nodeName).reshape(-1)\n for i in range(len(outputVariables)):\n e = MarabouUtils.Equation()\n e.addAddend(1, varInput[i])\n e.addAddend(-1, outputVariables[i])\n e.setScalar(-constInput[i])\n self.addEquation(e)", "def add_variable(self, x, y):\n pass", "def polynomiale_2(a: float, b: float, c: float, d: float, x: float) -> float:\n return ((((a*x + b) * x) + c) * x) + d", "def eval_formula(self, formula, a, b, c, d):\n if a == \"\": a = 0.0\n if b == \"\": b = 0.0\n if c == \"\": c = 0.0\n if d == \"\": d = 0.0\n try:\n a = float(a)\n except:\n raise ValueError, _(\"'a' value must be a float number\")\n try:\n b = float(b)\n except:\n raise ValueError, _(\"'b' value must be a float number\")\n try:\n c = float(c)\n except:\n raise ValueError, _(\"'c' value must be a float number\")\n try:\n d = float(d)\n except:\n raise ValueError, _(\"'d' value must be a float number\")\n # spaces are erased\n sre.sub(\"[ ]\",\"\",formula)\n # operators and varibles are replaced\n formula = formula.replace(\"+\", \" + \")\n formula = formula.replace(\"-\", \" - \")\n formula = formula.replace(\"*\", \" * \")\n formula = formula.replace(\"/\", \" / \")\n formula = formula.replace(\"^\", \" ** \")\n formula = formula.replace(\"(\", \" ( \")\n formula = formula.replace(\")\", \" ) \")\n formula = formula.replace(\"a\", str(a))\n formula = formula.replace(\"b\", str(b))\n formula = formula.replace(\"c\", str(c))\n formula = formula.replace(\"d\", str(d))\n formula = formula.replace(\"p\", \"3.1415926\")\n _list_formula = formula.split(\" \")\n _formula2 = \"\"\n for oper in _list_formula:\n try:\n _float_oper= str(float(oper))\n _formula2 = _formula2 + _float_oper\n except ValueError:\n _formula2 = _formula2 + oper\n _g = {\"__builtins__\":{}}\n try:\n return eval(_formula2, _g)\n except:\n raise ValueError, _(\"Invalid formula\")", "def test_get_vars(self):\r\n size = (5, 4)\r\n x = create_var(size)\r\n y = create_var(size)\r\n A = create_const(np.ones(size), size)\r\n # Expanding dict.\r\n add_expr = sum_expr([x, y, A])\r\n vars_ = get_expr_vars(add_expr)\r\n self.assertItemsEqual(vars_, [(x.data, size), (y.data, size)])", "def stickel_method(U: Set[Equation], ac_symbol: Function) -> SubstituteTerm:\n # Gather all variables for fresh var calculation\n ALL_VARS = vars_from_equations(U)\n original_from_generalized : Dict[Variable, Term] = dict()\n\n def generalize_term(t: Term) -> Variable:\n \"\"\"\n Returns a generalized variable for every\n term that's not a variable.\n \"\"\"\n vt = t\n if isinstance(t, Variable):\n original_from_generalized[t] = t\n else:\n vt = None\n for gen_var, og_term in original_from_generalized.items():\n if t == og_term:\n vt = gen_var\n break\n if vt is None:\n vt = fresh_variable(ALL_VARS)\n ALL_VARS.add(vt)\n original_from_generalized[vt] = t\n return vt\n\n var_count = Counter()\n # Go through each equation\n for e in U:\n LS, RS = flatten_equation(e, ac_symbol)\n # print(\"LS\", LS)\n # print(\"RS\", RS)\n\n # Generalize left and right sides\n LS_VARS = [generalize_term(t) for t in LS]\n RS_VARS = [generalize_term(t) for t in RS]\n\n # Calculate multiplicity\n VARS_IN_EQ = set(LS_VARS).union(set(RS_VARS))\n for x in VARS_IN_EQ:\n num = LS_VARS.count(x) - RS_VARS.count(x)\n var_count[x] += num\n\n # Create the equation with variable coeficients\n # being the counts above\n sympy_expression = 0\n var_map: Dict[sympy.core.Symbol, Variable] = dict()\n for x, count in var_count.items():\n # Construct Z3 variable\n sympy_var = symbols(x.symbol + \"_0\", integer=True, positive=True)\n var_map[sympy_var] = x\n\n # Construct part of expression\n sympy_expression += count * sympy_var\n\n\n # Determine the ordering of the diophantine solver output\n sympy_ordering = list(sympy_expression.expand(force=True).free_symbols)\n sympy_ordering.sort(key=default_sort_key)\n\n # Solve diophantine equation\n # print(original_from_generalized)\n # print(sympy_expression)\n basis_vector = diop_linear(sympy_expression)\n basis_tables = generate_basis_table(basis_vector)\n\n sigma = False\n while not sigma:\n # Generate the basis table\n basis_table = next(basis_tables)\n # print(basis_table)\n\n # Create variables representing each row\n row_vars = n_fresh_variables(ALL_VARS, len(basis_table))\n ALL_VARS = ALL_VARS.union(set(row_vars))\n\n # Craft intermediate substitution from basis table\n sub_basis: Dict[Variable, Term] = dict()\n for column, sympy_var in enumerate(sympy_ordering):\n term = None\n for i, row in enumerate(basis_table):\n if row[column] == 0:\n continue\n row_var = row_vars[i]\n for _ in range(row[column]):\n if term is None:\n term = row_var\n else: # z_2 + z_4\n term = ac_symbol(term, row_var)\n sub_basis[var_map[sympy_var]] = term\n\n # [TODO] [IN PROGRESS] Unify variables in the generalized terms with\n # their counterparts in the original terms.\n # print(sub_basis)\n new_eqs = set()\n for gen_var, basis_var in sub_basis.items():\n rhs = original_from_generalized[gen_var]\n new_eqs.add(Equation(\n basis_var,\n rhs\n ))\n sigma = syntactic_unification(new_eqs)\n\n\n # Currently returning one posisble unifier but we can keep generating\n # using the basis vector\n return {sigma}", "def test_equation_rewrite(self):\n variables = {}\n variables['x'] = PysolveVariable('x')\n variables['y'] = PysolveVariable('y')\n self.assertEqual('x - y', _rewrite(variables, {}, 'x - y'))\n self.assertEqual('xx - y', _rewrite(variables, {}, 'xx - y'))\n self.assertEqual('xx - yx', _rewrite(variables, {}, 'xx - yx'))\n self.assertEqual('xx(0) - yx', _rewrite(variables, {}, 'xx(0) - yx'))\n self.assertEqual('_series_acc(x,-1)',\n _rewrite(variables, {}, 'x(-1)'))\n self.assertEqual('_series_acc(x,-t)',\n _rewrite(variables, {}, 'x(-t)'))\n\n parameters = {}\n parameters['a'] = Parameter('a')\n parameters['b'] = Parameter('b')\n self.assertEqual('_series_acc(a,-1)',\n _rewrite({}, parameters, 'a(-1)'))", "def polynomiale(a: float, b: float, c: float, d: float, x: float) -> float:\n return a*x*x*x + b*x*x + c*x + d", "def register_equations(self, **kwargs):\n # -------\n # Aliases\n # -------\n\n m = self.m\n a = self.aqua\n\n # ----------\n # Parameters\n # ----------\n\n # Growing bed definition\n beds = kwargs.get('beds', [(0, 30)])\n\n # --------------------\n # Connecting Variables\n # --------------------\n\n T = a.T\n I = a.I # noqa\n N = a.N\n ppb = a.ppb\n w = a.w\n dNup = a.dNup\n\n # ---------------------------\n # Equations and Intermediates\n # ---------------------------\n\n time = m.SV(value=0)\n m.timevar = time\n m.Equation(time.dt() == 1)\n\n bed_models = [\n (PlantBed(), plant_day, harvest_day)\n for plant_day, harvest_day in beds\n ]\n bed_vars = [\n bed.register_equations(\n m, plant_day, harvest_day, time, T, I, N, **kwargs\n )\n for bed, plant_day, harvest_day in bed_models\n ]\n\n m.Equation(w == ppb * sum([var[0] for var in bed_vars]))\n m.Equation(dNup == ppb * sum([var[1] for var in bed_vars]))", "def equation_pxyz(self):\n\t\treturn f\"{self.vert} = {self.d}\"", "def equation_value(self):\n D = self.b ** 2 - 4 * self.a * self.c\n solution = []\n if D > 0:\n solution.append((-self.b + pow(self.b**2 - 4 * self.a * self.c, 0.5)) / 2 * self.a)\n solution.append((-self.b - pow(self.b**2 - 4 * self.a * self.c, 0.5)) / 2 * self.a)\n elif D == 0:\n solution.append(-self.b / (2 * self.a) + 0j)\n else:\n solution.append((-self.b + pow(self.b**2 - 4 * self.a * self.c, 0.5)))\n solution.append((-self.b + pow(self.b**2 - 4 * self.a * self.c, 0.5)))\n return solution", "def polynomiale(a:int, b:int, c:int, d:int, x: float) -> float:\n return a * pow(x, 3) + b * pow(x, 2) + c * x + d", "def add_brackets(equation, brackets):\n for pos, brace in brackets.items():\n equation[pos] = brace", "def update_parameters_adagrad(self, grads,adagrads, learning_rate=1.2, reg_term=0, m = 1):\r\n\r\n for i in range(len(self.w)):\r\n\r\n self.w[i] = (1-reg_term/m) * self.w[i] - (learning_rate / (np.sqrt(adagrads[\"dW\" + str(i + 1)]) + 0.000000001)) * grads[\"dW\" + str(i + 1)]\r\n self.b[i] = (1-reg_term/m) * self.b[i] - (learning_rate / (np.sqrt(adagrads[\"db\"+str(i+1)]) + 0.000000001)) * grads[\"db\" + str(i + 1)]\r\n self.set_parameters_internal()\r\n\r\n return self.parameters", "def __add__(self,vect):\r\n \r\n x = self.x + vect.x\r\n y = self.y + vect.y\r\n z = self.z + vect.z\r\n \r\n return vecteur(x,y,z)", "def add_diffraction_grid(self,D, a, Nx, Ny):\n\n E0 = np.copy(self.E)\n Ef = 0\n \n b = D-a\n width, height = Nx*a + (Nx-1)*b , Ny*a + (Ny-1)*b\n x0 ,y0 = -width/2 , height/2\n \n x0 = -width/2 + a/2\n for i in range(Nx):\n y0 = height/2 - a/2\n for j in range(Ny):\n \n Ef += np.select( [((self.xx > (x0 - a/2) ) & (self.xx < (x0 + a/2) )) & ((self.yy > (y0 - a/2) ) & (self.yy < (y0 + a/2) )), True], [E0, 0])\n y0 -= D\n x0 += D \n self.E = Ef", "def __add__(self, b):\n res = FourMomentum(\n (self.px + b.px,\n self.py + b.py,\n self.pz + b.pz,\n self.e + b.e),\n 'x,y,z,e'\n )\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the operations to the equation at the right spots.
def add_operations(equation, operations): for i, j in enumerate(range(3, 17, 5)): equation[j] = operations[i]
[ "def quad_add_oper(self, oper):\n\n if IdleCompiler.__should_gen_quads:\n IdleCompiler.__interp.add_operator(oper)", "def apply_operators(operators, expression):\n\n i = 1\n while i < len(expression) - 1:\n\n if expression[i] in operators:\n operator = expression[i]\n op1 = expression[i - 1]\n op2 = expression[i + 1]\n\n # Apply the operation between the previous and following values\n if operator == '+':\n res = op1 + op2\n elif operator == '-':\n res = op1 - op2\n elif operator == '*':\n res = op1 * op2\n elif operator == '/':\n res = op1 / op2\n else:\n raise Exception(\"apply_operator() should only be called with valid operators!\")\n\n # Replace the 3 items (op1, operator, op2) with the operation result\n expression[i-1] = res\n del expression[i+1]\n del expression[i]\n\n else:\n i += 1 # Increment index", "def arithmetic_expression(self, symbol_table):\n if not hasattr(self, 'operator') and hasattr(self.op, '_tx_fqn'):\n return self.op.evaluate(self.op, symbol_table)\n if not hasattr(self, 'operator'):\n return self.op\n if len(self.operator) == 0:\n operand =self.op.pop(0)\n return operand.evaluate(operand, symbol_table)\n else:\n operator = self.operator.pop(0)\n op = self.op.pop(0)\n operand = op.evaluate(op, symbol_table)\n return symbol_table[operator](\n operand,\n self.evaluate(self, symbol_table)\n )", "def math_operation_reverse_precedence(expression: str) -> str:\n elements = expression.split()\n addition_evaluated = []\n final = 1\n for index, value in enumerate(elements):\n if value == \"*\":\n addition_evaluated.append(value)\n elif index == 0:\n addition_evaluated.append(int(value))\n elif index % 2 == 0 and index >= 2 and elements[index - 1] == \"+\":\n if addition_evaluated[-1] in [\"+\", \"*\"]:\n addition_evaluated.append(int(value))\n else:\n addition_evaluated[-1] += int(value)\n elif addition_evaluated[-1] == \"*\":\n addition_evaluated.append(int(value))\n for index, value in enumerate(addition_evaluated):\n if index == 0:\n final *= int(value)\n if index % 2 == 0 and index >= 2 and addition_evaluated[index - 1] == \"*\":\n final *= int(value)\n return str(final)", "def apply_operators(e):\n e = e.expand()\n muls = e.atoms(Mul)\n subs_list = [(m, _apply_Mul(m)) for m in iter(muls)]\n return e.subs(subs_list)", "def write_arithmetic(self, op):\n self.write_vm_cmd(op)", "def operate(term1: int, term2: int, op: str) -> int:\n if op == '+':\n return term1 + term2\n elif op == '*':\n return term1 * term2\n else:\n raise ValueError", "def calculate(self, op, a, b):\n if op == \"+\":\n return a + b\n elif op == \"-\":\n return a - b\n elif op == \"*\":\n return a * b\n elif op == \"/\":\n return a / b", "def compile_expression(self) -> None:\n\n # first term\n self.compile_term()\n\n peek_at_token = self.tokenizer.peek_at_next_token()[1]\n\n while peek_at_token in BINARY_OPERATORS:\n # binary op\n self.tokenizer.advance()\n operation = self.tokenizer.get_current_token()[1]\n\n # expression\n self.tokenizer.advance()\n\n # compile term\n self.compile_term()\n\n arithmetic_command = BINARY_DICT[peek_at_token]\n self.VMWriter.write_arithmetic(arithmetic_command)\n\n # renew again\n peek_at_token = self.tokenizer.peek_at_next_token()[1]", "def calc(self, n1, op, n2):\n if op == \"*\":\n return n1 * n2\n if op == \"/\":\n return n1 // n2\n if op == \"+\":\n return n1 + n2\n if op == \"-\":\n return n1 - n2", "def advance_operation(self):\n# self.operation.set('+')\n if (self.operation.get() == '+'):\n self.operation.set('-')\n elif(self.operation.get() == '-'):\n self.operation.set('*')\n elif(self.operation.get() == '*'):\n self.operation.set('/')\n elif(self.operation.get() == '/'):\n self.operation.set('+')\n\n self.display_result()", "def write_arithmetic(self, command):\n \n note = '// ' + self._current_function_name + ': ' + command + '\\n'\n \n if command in ['add', 'sub', 'neg']:\n code = asm.math_cmd(asm.math_table[command])\n \n elif command in ['eq', 'gt', 'lt']:\n self._jump_count += 1\n jump = self._current_function_name + '$JUMP.' + str(self._jump_count)\n code = asm.compare_cmd(asm.math_table[command], jump) \n\n #if command is an and, or, not \n else:\n code = asm.logic_cmd(asm.math_table[command]) \n\n self._file_open.write(note + code + '\\n') \n return note + code + '\\n'", "def test_4_times_2_plus_1(self):\n parser = Parser()\n infix = [FloatToken(4.0), OpToken('*'), FloatToken(2.0), OpToken('+'),\n FloatToken(1.0)]\n expected = [Operand(value=4.0), Operand(value=2.0), MulOperator(),\n Operand(value=1.0), AddOperator()]\n output = parser.infix_to_postfix(infix)\n assert output == expected", "def test_4_plus_2_times_1(self):\n parser = Parser()\n infix = [FloatToken(4.0), OpToken('+'), FloatToken(2.0), OpToken('*'),\n FloatToken(1.0)]\n expected = [Operand(value=4.0), Operand(value=2.0), Operand(value=1.0),\n MulOperator(), AddOperator()]\n output = parser.infix_to_postfix(infix)\n assert output == expected", "def addEquations(self, node, makeEquations):\n nodeName = node.output[0]\n \n # Get the inputs\n inputName1, inputName2 = node.input\n shape1 = self.shapeMap[inputName1]\n shape2 = self.shapeMap[inputName2]\n self.shapeMap[nodeName] = shape1 \n \n # Decide which inputs are variables and which are constants\n firstInputConstant = False; secondInputConstant = False\n if inputName1 in self.constantMap:\n # Broadcast the constant input1 to the same shape as input2\n input1 = np.copy(self.constantMap[inputName1]) + np.zeros(shape2)\n firstInputConstant = True\n else:\n input1 = self.varMap[inputName1]\n \n if inputName2 in self.constantMap:\n # Broadcast the constant input2 to the same shape as input1\n input2 = np.copy(self.constantMap[inputName2]) + np.zeros(shape1)\n secondInputConstant = True\n else:\n input2 = self.varMap[inputName2]\n \n # The shape after broadcasting must match\n assert input1.shape == input2.shape\n self.shapeMap[nodeName] = shape1\n \n # If both inputs to add are constant, then the output is constant too\n # No new variables are needed, we just need to store the output in constantMap\n if firstInputConstant and secondInputConstant:\n self.constantMap[nodeName] = input1 + input2\n \n # If both inputs are variables, then we need a new variable to represent\n # the sum of the two variables\n elif makeEquations and not firstInputConstant and not secondInputConstant:\n outputVariables = self.makeNewVariables(nodeName)\n input1 = input1.reshape(-1)\n input2 = input2.reshape(-1)\n outputVariables = outputVariables.reshape(-1)\n for i in range(len(input1)):\n e = MarabouUtils.Equation()\n e.addAddend(1, input1[i])\n e.addAddend(1, input2[i])\n e.addAddend(-1, outputVariables[i])\n e.setScalar(0.0)\n self.addEquation(e)\n \n # Otherwise, we are adding constants to variables.\n # We don't need new equations or new variables if the input variable is the output of a linear equation.\n # Instead, we can just edit the scalar term of the existing linear equation.\n # However, if the input variables are not outputs of linear equations (input variables or outputs of \n # activation functions) then we will need new equations.\n elif makeEquations:\n if firstInputConstant:\n constInput = input1\n varInput = input2\n else:\n constInput = input2\n varInput = input1\n constInput = constInput.reshape(-1)\n varInput = varInput.reshape(-1)\n \n # Adjust equations to incorporate the constant addition\n numEquationsChanged = 0\n for equ in self.equList:\n (c,var) = equ.addendList[-1]\n assert c == -1\n if var in varInput:\n ind = np.where(var == varInput)[0][0]\n \n # Adjust the equation\n equ.setScalar(equ.scalar-constInput[ind])\n numEquationsChanged += 1\n \n # If we changed one equation for every input variable, then\n # we don't need any new equations\n if numEquationsChanged == len(varInput):\n self.varMap[nodeName] = varInput\n else:\n # Otherwise, assert no equations were changed, and we need to create new equations\n assert numEquationsChanged == 0\n outputVariables = self.makeNewVariables(nodeName).reshape(-1)\n for i in range(len(outputVariables)):\n e = MarabouUtils.Equation()\n e.addAddend(1, varInput[i])\n e.addAddend(-1, outputVariables[i])\n e.setScalar(-constInput[i])\n self.addEquation(e)", "def build_operators(self):\n A = self.frame\n m, n = A.shape\n Ax = lambda x: np.dot(A, x)\n Ax = LinearOperator( (m,n), Ax, matmat=Ax, dtype='d' )\n Aty = lambda y: np.dot(A.T, y)\n Aty = LinearOperator( (n,m), Aty, matmat=Aty, dtype='d' )\n return Ax, Aty", "def add_inplace_numeric_operator(self, operator, right=None):\n operator = utils.ascii(operator)\n if not isinstance(operator, str):\n raise TypeError(\"expected operator name as string\")\n if operator not in ['+=', '-=', '*=', '/=']:\n raise ValueError(\"The operator %r is invalid or not yet supported by PyBindGen\" % (operator,))\n try:\n l = self.inplace_numeric_operators[operator]\n except KeyError:\n l = []\n self.inplace_numeric_operators[operator] = l\n if right is None:\n right = self\n else:\n if isinstance(right, str):\n right = utils.param(right, 'right')\n try:\n right = utils.eval_param(right, None)\n except utils.SkipWrapper:\n return\n if right not in l:\n l.append((self, self, right))", "def test_advanced_math(self):\n exp = \"m{(10+10)+10+10}\"\n self.assertEqual(self.engine.Process(exp), \"40\", \"adds complex nested math\")", "def add_variables(equation, variables):\n for i, j in enumerate(range(2, 17, 4)):\n equation[j] = variables[i]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the brackets to the equation at the right spots.
def add_brackets(equation, brackets): for pos, brace in brackets.items(): equation[pos] = brace
[ "def _solve_brackets(self, terms):\n while self._check_brackets(terms): # solve all terms inbetween brackets\n start = terms.index('(') # opening bracket\n end = self._find_closing_bracket(terms, start) # closing bracket related to start\n val = self.calc_term(terms[start+1:end]) # Value of term inbetween brackets\n # replace term in bracket by its value.\n new = terms[:start]\n new.append(val)\n new.extend(terms[end+1:])\n terms = new\n return terms", "def _bracket(self, expr, parent_precedence):\n expr_prec = precedence(expr)\n parent_prec = parent_precedence\n # Some equations are substituted for expr. of lower precedence\n # For example x**-1 is printed as 1/x. An example where this would give na issue is 2**cos(x)**-1\n # Which should print as 2**(1 / math.cos(x))\n # Adjust precedence to put brackets around 1/x if necessary\n if isinstance(expr, sympy.Pow) and expr.is_commutative and \\\n (-expr.exp is sympy.S.Half or -expr.exp is sympy.S.One):\n expr_prec -= 1\n\n if expr_prec < parent_prec:\n return '(' + self._print(expr) + ')'\n return self._print(expr)", "def add_operations(equation, operations):\n for i, j in enumerate(range(3, 17, 5)):\n equation[j] = operations[i]", "def bracket(self, x, y):\n return x*y - y*x", "def wrap_in_brackets(string, brackets='[]', space=0):\n\n # Cut leading/trailing brackets\n while string.startswith(brackets[0]):\n string = string[1:]\n while string.endswith(brackets[1]):\n string = string[:-1]\n\n return f\"[{' '*space}{string}{' '*space}]\"", "def hop_brackets(code_edit):\n textCursor = code_edit.textCursor()\n pos = textCursor.position()\n whole_text = code_edit.toPlainText()\n\n first_half = whole_text[:pos]\n second_half = whole_text[pos:]\n first_pos = first_half.rfind('(')\n second_pos = second_half.find(')')\n\n first_pos = first_pos + 1\n second_pos = second_pos + pos\n\n new_pos = first_pos if whole_text[pos] == ')' else second_pos\n textCursor.setPosition(new_pos, QtGui.QTextCursor.MoveAnchor)\n code_edit.setTextCursor(textCursor)", "def infix(self):\n # Task 1.1\n if is_constant(self.root) or is_variable(self.root):\n return self.root\n\n if is_unary(self.root):\n return self.root + self.first.infix()\n\n if is_binary(self.root):\n return \"(\" + self.first.infix() + self.root + self.second.infix() + \")\"\n\n else:\n raise Exception(\"Invalid Formula\")", "def brackets_to_txt(L):\n # if the base ring is QQbar, display coefficients as radicals\n disp = QQbar.options('display_format')\n QQbar.options(display_format=\"radical\")\n\n bracketstr = \"\"\n for X, Y in combinations(L.basis(), 2):\n Z = X.bracket(Y)\n if Z:\n bracketstr += \" [%s, %s] = %s\\n\" % (X, Y, Z)\n QQbar.options(display_format=disp)\n return bracketstr", "def test_2_times_paren_4_plus_3(self):\n parser = Parser()\n infix = [FloatToken(2.0), OpToken('*'), ParenToken('('),\n FloatToken(4.0), OpToken('+'), FloatToken(3.0),\n ParenToken(')')]\n expected = [Operand(value=2.0), Operand(value=4.0), Operand(value=3.0),\n AddOperator(), MulOperator()]\n output = parser.infix_to_postfix(infix)\n assert output == expected", "def _createWellFormedExpression(self, expression):\n while (expression.find(\" \") != -1):\n expression = expression.replace(\" \", \"\")\n\n expression = expression.replace(\"\\n\", \" \")\n expression = expression.replace(\"\\t\", \" \")\n\n expression = expression.replace(\"+\", \" + \")\n expression = expression.replace(\"*\", \" * \")\n\n expression = expression.replace(\"(\", \" ( \")\n expression = expression.replace(\")\", \" ) \")\n\n expression = expression.replace(\"[\", \" [ \")\n expression = expression.replace(\"]\", \" ] \")\n \n while (expression.find(\" \") != -1):\n expression = expression.replace(\" \", \" \")\n \n return expression", "def square_brackets_expand(expr):\n k = 0\n tokens = []\n result = []\n\n while k < len(expr):\n tokens.append(expr[k])\n if len(tokens) == 3 and tokens[1] == '-':\n # This is a range like a-z.\n start, end = tokens[0], tokens[2]\n for i in range(ord(start), ord(end) + 1):\n result.append(chr(i))\n tokens = []\n elif len(tokens) == 3:\n # No dash in the middle. We can safely expand the first character.\n result.append(tokens[0])\n tokens = tokens[1:]\n k += 1\n else:\n if tokens:\n result.extend(tokens)\n return result", "def remove_terms_in_bracket(text,bracket_form=\"curly\"):\n CURLY_BRACKET_REGEX = re.compile(r\"\\{(.*?)\\}\") \n SQUARE_BRACKET_REGEX = re.compile(r\"\\[(.*?)\\]\") \n NORMAL_BRACKET_REGEX = re.compile(r\"\\((.*?)\\)\") \n if bracket_form == \"curly\" or bracket_form == \"{}\":\n result = re.sub(CURLY_BRACKET_REGEX,\"\",text)\n elif bracket_form == \"square\" or bracket_form == \"[]\":\n result = re.sub(SQUARE_BRACKET_REGEX,\"\",text)\n elif bracket_form == \"normal\" or bracket_form == \"()\":\n result = re.sub(NORMAL_BRACKET_REGEX,\"\",text)\n return result", "def render(expr, lhs=\"\"):\n left = \"$\"\n if lhs:\n left = \"$%s =\" % lhs\n return ''.join([left, sympy.latex(expr), \"$\"])", "def replace_(expression):\n original = ['x', '÷', '^', 'π', 'e', 'sin⁻¹(', 'cos⁻¹(', 'tan⁻¹(', '!', \"√\"]\n replaced = ['*', '/', '**', str(math.pi), str(math.e), 'asin(', 'acos(', 'atan(', 'factorial(', \"square_root(\"]\n for original_, replaced_ in zip(original, replaced):\n new_text = expression.replace(original_, replaced_)\n expression = new_text\n \n # Adding required parenthesis\n if expression.count('(') > expression.count(')'):\n expression = expression + ')'\n \n # Removing Redundant parenthesis\n while expression.count('(') < expression.count(')'):\n expl = list(expression)\n expl.remove(')')\n expression = ''.join(expl)\n return expression", "def brackets_to_align(L, amp=\"&amp;\"):\n rows = []\n for X, Y in combinations(L.basis(), 2):\n Z = X.bracket(Y)\n if Z:\n row = \"[%s, %s] %s = %s\" % (latex(X), latex(Y), amp, latex(Z))\n rows.append(row)\n if not rows:\n return \"\"\n latexstr = \"\\\\begin{align*}\\n\"\n latexstr += \"\\\\\\\\\\n\".join(rows)\n latexstr += \"\\\\end{align*}\"\n QQbar.options(display_format=disp)\n return latexstr", "def brackets_to_table(L):\n # if the base ring is QQbar, display coefficients as radicals\n disp = QQbar.options('display_format')\n QQbar.options(display_format=\"radical\")\n\n rows = []\n for X, Y in combinations(L.basis(), 2):\n Z = X.bracket(Y)\n if Z:\n rows.append((X, Y, Z))\n if not rows:\n return \"\"\n\n htmlstr = '<table class=\"brackets\">\\n'\n for (X, Y, Z) in rows:\n htmlstr += '<tr>'\n htmlstr += '<td class=\"brkt\">[%s, %s]</td>' % (X, Y)\n htmlstr += '<td class=\"eq\">=</td>'\n htmlstr += '<td class=\"res\">%s</td>' % Z\n htmlstr += '</tr>\\n'\n htmlstr += '</table>'\n return htmlstr", "def generate_expression(p_array_nbs, p_list_operations):\n\n ret_expression = ''\n\n for i in range(0, len(p_array_nbs) - 1):\n ret_expression += p_array_nbs[i] + ' ' + p_list_operations[i] + ' '\n\n ret_expression += p_array_nbs[-1]\n\n return ret_expression", "def test_4_times_2_plus_1(self):\n parser = Parser()\n infix = [FloatToken(4.0), OpToken('*'), FloatToken(2.0), OpToken('+'),\n FloatToken(1.0)]\n expected = [Operand(value=4.0), Operand(value=2.0), MulOperator(),\n Operand(value=1.0), AddOperator()]\n output = parser.infix_to_postfix(infix)\n assert output == expected", "def test_4_plus_2_times_1(self):\n parser = Parser()\n infix = [FloatToken(4.0), OpToken('+'), FloatToken(2.0), OpToken('*'),\n FloatToken(1.0)]\n expected = [Operand(value=4.0), Operand(value=2.0), Operand(value=1.0),\n MulOperator(), AddOperator()]\n output = parser.infix_to_postfix(infix)\n assert output == expected" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return chain length of sequence starting on one.
def chain_length(seq): compare = 1 for i in range(len(seq)): if not compare == seq[i]: return compare else: compare += 1 return compare
[ "def calc_chain_len(num):\n\n # Check if the length has been calculated before\n length = chain_len.get(num)\n if length:\n return length\n\n # If not, calculate it recursively\n length = calc_chain_len(next_number(num)) + 1\n\n # Keep track of the chain length\n chain_len[num] = length\n\n return length", "def chainLength(self, start_number):\n \n number = start_number\n number_of_steps = 1\n while number != 1:\n if number in self.cache:\n new_steps = number_of_steps + self.cache[number]\n self.cache[start_number] = new_steps\n return\n elif number % 2 == 0:\n number /= 2\n else:\n number = 3 * number + 1\n number_of_steps += 1\n self.cache[start_number] = number_of_steps\n return", "def _seq_len(seq):\n i = 0\n for item in seq:\n i += 1\n return i", "def receiving_chain_length(self):\n\n return None if self.__receiving_chain == None else self.__receiving_chain.length", "def length(list):\n if list == []:\n return 0\n elif list[0:] == list[0]:\n return 1\n else:\n return length(list[1:]) + 1 # calls list from second value to the end to cycle through", "def get_first_length(self):\n if self.first_set is None:\n return 0\n\n return len(self.first_set)", "def solution():\n c = Chain()\n for x in xrange(2,1000000):\n if x not in c.cache:\n length = c.calculate_length(x)\n c.update_longest_chain(x, length)\n return c.start_number_with_longest_chain", "def sending_chain_length(self):\n\n return None if self.__sending_chain == None else self.__sending_chain.length", "def length(self):\n counter = 0\n current_node = self.head\n while current_node != None:\n counter += 1\n current_node = current_node.next\n return counter", "def count(seq):\n return sum(1 for x in seq)", "def seqlenProtein(self):\n return (len(self.sequence))", "def get_LL_length(self):\n current = self.head\n length = 0 \n while current:\n current = current.next\n length += 1 \n return length", "def length(branch):\n return branch[0]", "def get_sequence_size(seqs):\n return [len(seqs), sum([len(seqs[seq]) for seq in seqs]) // len(seqs)]", "def base_count(self):\n\n\t\tcount = len(self.dna_seq)\n\n\t\treturn(count)", "def length(self, sequence):\n\n\t\tused = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))\n\t\tlength = tf.reduce_sum(used, reduction_indices=1)\n\t\tlength = tf.cast(length, tf.int32)\n\t\treturn length", "def sequence_lengths(self):\n return self._sequence_lengths", "def num_chains(rack):\n if live_object_is_valid(rack) and isinstance(rack, Live.RackDevice.RackDevice):\n return len(rack.chains) + len(rack.return_chains)\n return 0", "def count(seq): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit the best fitting plane p u + q v = w
def fit_uvwplane_only(vis: Visibility) -> (float, float): su2 = numpy.sum(vis.u * vis.u) sv2 = numpy.sum(vis.v * vis.v) suv = numpy.sum(vis.u * vis.v) suw = numpy.sum(vis.u * vis.w) svw = numpy.sum(vis.v * vis.w) det = su2 * sv2 - suv ** 2 p = (sv2 * suw - suv * svw) / det q = (su2 * svw - suv * suw) / det return p, q
[ "def fit_plane(x, y, z):\n pts = np.isfinite(z)\n if len(z.shape) > 1:\n x, y = np.meshgrid(x, y)\n xx, yy = x[pts].flatten(), y[pts].flatten()\n else:\n xx, yy = x, y\n\n flat = np.ones(xx.shape)\n\n coefs = np.linalg.lstsq(np.stack([xx, yy, flat]).T, z[pts].flatten(), rcond=None)[0]\n plane_fit = coefs[0] * x + coefs[1] * y + coefs[2]\n return plane_fit", "def fit_uvwplane(vis: Visibility, remove=True) -> (Image, float, float):\n nvis = len(vis.data)\n before = numpy.max(numpy.std(vis.w))\n p, q = fit_uvwplane_only(vis)\n residual = vis.data['uvw'][:, 2] - (p * vis.u + q * vis.v)\n after = numpy.max(numpy.std(residual))\n log.debug('fit_uvwplane: Fit to %d rows reduces rms w from %.1f to %.1f m'\n % (nvis, before, after))\n if remove:\n vis.data['uvw'][:, 2] -= p * vis.u + q * vis.v\n return vis, p, q", "def fit(self, star):\n star1 = self.chisq(star) # Get chisq Taylor expansion for linearized model\n ### Check for non-pos-def\n ###S = np.linalg.svd(star1.fit.alpha,compute_uv=False)\n ###print(\" .in fit(), min SV:\",np.min(S))###\n ###U,S,Vt = np.linalg.svd(star1.fit.alpha,compute_uv=True)\n ###print(\" ..in fit(), min SV:\",np.min(S))###\n\n # star1 has marginalized over flux (& center, if free), and updated these\n # for best linearized fit at the input parameter values.\n if self._degenerate:\n # Do SVD and retain\n # input values for degenerate parameter combinations\n # U,S,Vt = np.linalg.svd(star1.fit.alpha)\n S,U = np.linalg.eigh(star1.fit.alpha)\n # Invert, while zeroing small elements of S.\n # \"Small\" will be taken to be causing a small chisq change\n # when corresponding PSF component changes by the full flux of PSF\n small = 0.2 * self.pixel_area * self.pixel_area\n if np.any(S < -small):\n print(\"negative: \",np.min(S),\"small:\",small)###\n raise ValueError(\"Negative singular value in alpha matrix\")\n # Leave values that are close to zero equal to zero in inverse.\n nonzero = np.abs(S) > small\n invs = np.zeros_like(S)\n invs[nonzero] = 1./S[nonzero]\n\n ###print('S/zero:',S.shape,np.count_nonzero(np.abs(S)<=small),'small=',small) ###\n ###print(' ',np.max(S[np.abs(S)<=small]),np.min(S[np.abs(S)>small])) ##\n # answer = V * S^{-1} * U^T * beta\n # dparam = np.dot(Vt.T, invs * np.dot(U.T,star1.fit.beta))\n dparam = np.dot(U, invs * np.dot(U.T,star1.fit.beta))\n else:\n # If it is known there are no degeneracies, we can skip SVD\n dparam = np.linalg.solve(star1.fit.alpha, star1.fit.beta)\n # ??? dparam = scipy.linalg.solve(alpha, beta, sym_pos=True) would be faster\n # Create new StarFit, update the chisq value. Note no beta is returned as\n # the quadratic Taylor expansion was about the old parameters, not these.\n starfit2 = StarFit(star1.fit.params + dparam,\n flux = star1.fit.flux,\n center = star1.fit.center,\n alpha = star1.fit.alpha, # Inverse covariance matrix\n chisq = star1.fit.chisq \\\n + np.dot(dparam, np.dot(star1.fit.alpha, dparam)) \\\n - 2 * np.dot(star1.fit.beta, dparam))\n return Star(star1.data, starfit2)", "def compute_fit(self):\n self.z = np.polyfit(self.a, self.e, 2) # Getting the fit parameters\n self.f = np.poly1d(self.z) ## Getting the new function\n self.x_fit = np.linspace(self.a[0], self.a[-1], 100)\n self.y_fit = self.f(self.x_fit)\n\n # Similarly for the volume\n self.vz = np.polyfit(self.v, self.e, 2) # Getting the fit parameters\n self.vf = np.poly1d(self.vz) ## Getting the new function\n self.v_x_fit = np.linspace(self.v[0], self.v[-1], 100)\n self.v_y_fit = self.vf(self.v_x_fit)\n\n # Getting the minimum energy\n self.E_optimized = min(self.y_fit)\n self.E_optimized_printable = self.E_optimized.astype(np.float)\n\n # Getting the optimized lattice constant\n self.min_index = np.argmin(self.y_fit)\n self.a0_optimized = self.x_fit.flat[self.min_index]\n self.v0_optimized = self.v_x_fit.flat[self.min_index] # There are four primitive cells in a single conventional cell\n\n # Calculations\n # Getting the double derivative using a 2nd degree polynomial\n self.dda0 = 2*self.z[0]#.flat[0]\n self.ddv0 = 2*self.vz[0]#.flat[0]\n self.B = eVA32GPa(self.v0_optimized*self.ddv0) # 1 eV/Angstrom3 = 160.21766208 GPa", "def _fit(self, X, y, w):\n pass", "def fit(self, z, w=None):\n return super().fit(z, w=w)", "def fit_plane_directly(img=None, xyz=None):\n if xyz is None:\n if img is None:\n raise RuntimeError(\"Need at least one valid input!\")\n xyz = make_data_points_from_image(img) # (3, N) shape: (X, Y, Z)\n # Filter out coordinates with NaN z-values\n zmask = np.isfinite(xyz[2])\n xyz = xyz[:, zmask] # now the input xyz array is preserved\n # Switch to (N, 3), the input format for linalg.lstsq\n xyz = xyz.T\n # Put things in terms of z and A; solving for C in z = A*C\n z = xyz[:, 2].copy()\n A = xyz\n A[:, 2] = 1\n \"\"\"\n All set up for z = A*C, where C is our (3,1) array of answers!\n C should be [[a], [b], [d]], where a gives the x gradient, b the y\n gradient, and d the offset (z-value) at the origin\n z = a*x + b*y + d is the expression for z, rearranged from the more general\n a*x + b*y + c*z + d = 0 when c = -1\n See https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lstsq.html\n for lstsq documentation (explanations of res, rnk, s, which I won't use)\n \"\"\"\n p, res, rnk, s = lstsq(A, z)\n print(p.shape)\n return p", "def _weibull_fit(self):\n if not self._is_random:\n curr_avg = np.mean(self._relmat, axis=1)\n x_val = np.array(range(0, len(curr_avg)))\n curr_model = Model(weibull_model)\n curr_model.set_param_hint('c', value = 1, min = 0, max = np.inf)\n curr_model.set_param_hint('a', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('b', value=1, min=0, max=np.inf)\n pars = curr_model.make_params()\n result = curr_model.fit(curr_avg,\n k = x_val,\n params=pars,method=\"leastsq\")\n pred_x = np.array(range(0, self._pred_x))\n self._pred_y = result.eval(params=result.params, k=pred_x)\n else:\n for i in range(0, self._shuffle_time):\n curr_idx = np.random.choice(self._rnum, self._csys, replace=False)\n curr_relmat = self._relmat[:, curr_idx]\n curr_avg = np.mean(curr_relmat, axis=1)\n x_val = np.array(range(0, len(curr_avg))) + 1\n curr_model = Model(weibull_model)\n curr_model.set_param_hint('c', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('a', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('b', value=1, min=0, max=np.inf)\n pars = curr_model.make_params()\n result = curr_model.fit(curr_avg,\n k=x_val,\n params=pars, method=\"leastsq\")\n pred_x = np.array(range(0, self._pred_x))\n self._pred_y += result.eval(params=result.params, k=pred_x)\n self._pred_y /= self._shuffle_time", "def fit(self) -> None:\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `sigma_ols`.\n self.sigma_ols = self._compute_sigma_ols()\n\n mu_prior = np.zeros((self.m, self.N))\n for i in range(self.m):\n mu_prior[i, self.p * i] = 1\n mu_prior = mu_prior.flatten()\n\n v_prior = self._construct_v_prior()\n\n Z_sig_Z_sum = 0\n Z_sig_y_sum = 0\n\n for t in range(self.p, self.T):\n Z_t = self._construct_Zt(\n self.X, self.Y, t\n ) # shape: m x [m * (m * p + r + 1)]\n\n z_sum_term = (\n Z_t.T @ inv(self.sigma_ols)\n ) @ Z_t # shape: [m * (m * p + r + 1)] x [m * (m * p + r + 1)]\n y_sum_term = (Z_t.T @ inv(self.sigma_ols)) @ self.Y[\n :, t\n ] # shape: [m * (m * p + r + 1)] x 1\n\n assert (\n self.num_mu_coefficients,\n self.num_mu_coefficients,\n ) == z_sum_term.shape, f\"Expected {(self.num_mu_coefficients, self.num_mu_coefficients)}, got {z_sum_term.shape}\"\n assert (\n self.num_mu_coefficients,\n ) == y_sum_term.shape, (\n f\"Expected {(self.num_mu_coefficients,)}, got {y_sum_term.shape}\"\n )\n\n Z_sig_Z_sum += z_sum_term\n Z_sig_y_sum += y_sum_term\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `v_posterior`.\n self.v_posterior = inv(\n inv(v_prior) + Z_sig_Z_sum\n ) # shape: [m * (m * p + r + 1)] x [m * (m * p + r + 1)]\n assert (\n self.num_mu_coefficients,\n self.num_mu_coefficients,\n ) == self.v_posterior.shape, f\"Expected {(self.num_mu_coefficients, self.num_mu_coefficients)}, got {self.v_posterior.shape}\"\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `mu_posterior`.\n self.mu_posterior = self.v_posterior @ (\n inv(v_prior) @ mu_prior + Z_sig_y_sum\n ) # shape: [m * (m * p + r + 1)] x 1\n assert (\n self.num_mu_coefficients,\n ) == self.mu_posterior.shape, (\n f\"Expected {(self.num_mu_coefficients,)}, got {self.mu_posterior.shape}\"\n )\n # pyre-fixme[16]: `BayesianVAR` has no attribute `resid`.\n self.resid = self._get_training_residuals()\n self.fitted = True", "def train_model_qp(data, model):\n P, q, G, h = qp_helper(data, model)\n P = cvxopt.matrix(P, P.shape, 'd')\n q = cvxopt.matrix(q, q.shape, 'd')\n G = cvxopt.matrix(G, G.shape, 'd')\n h = cvxopt.matrix(h, h.shape, 'd')\n sol = cvxopt.solvers.qp(P, q, G, h)\n z = np.array(sol['x'])\n # Implementation here (do not modify the code above)\n pass\n # Set model.w\n feature_size = model.ndims + 1\n model.w = z[0:feature_size]", "def plane_fit(image_array, guess = [0,0,0]):\n x,y = np.indices(image_array.shape) #grid indices\n xy = np.array([x.ravel(), y.ravel()])\n f = lambda xy, (a, b, c): a*xy[0] + b*xy[1] +c\n A = leastsq(lambda params: image_array.ravel() - f(xy, params), guess)\n imfit = f(xy, A[0])\n return image_array - imfit.reshape(image_array.shape)", "def get_best_fit_plane(pts):\n A = np.c_[pts[:, 0], pts[:, 1], np.ones(pts.shape[0])]\n (a, b, c), _, _, _ = scipy.linalg.lstsq(A, pts[:, 2]) # coefficients\n\n errors = np.array([])\n\n direction = np.array([a, b, -1])\n normal = direction / np.linalg.norm(direction)\n\n projections = np.array([])\n\n for pt in pts:\n dist = np.dot(normal, pt - np.array([0, 0, c]))\n projection = pt - dist * normal\n projections = np.append(projections, projection)\n projections = projections.reshape(-1, 3)\n errors = np.append(errors, dist)\n # If this value is close to 0, then the distances are accurate\n # print(A * projection[0] + B * projection[1] + c - projection[2])\n\n return (a, b, c), np.sqrt(sum([error ** 2 for error in errors]) /\n len(errors))", "def surfpt(self, u=-1, v=-1, **kwargs):\n check_vars = kwargs.get('check_vars', True)\n\n if check_vars:\n # Check all parameters are set before the surface evaluation\n self._check_variables()\n # Check if u and v parameters are correct\n utils.check_uv(u, v)\n\n # Algorithm A4.3\n span_v = utils.find_span(self._degree_v, tuple(self._knot_vector_v), self._control_points_size_v, v)\n basis_v = utils.basis_functions(self._degree_v, tuple(self._knot_vector_v), span_v, v)\n span_u = utils.find_span(self._degree_u, tuple(self._knot_vector_u), self._control_points_size_u, u)\n basis_u = utils.basis_functions(self._degree_u, tuple(self._knot_vector_u), span_u, u)\n idx_u = span_u - self._degree_u\n sptw = [0.0 for _ in range(self._dimension)]\n\n for l in range(0, self._degree_v + 1):\n temp = [0.0 for _ in range(self._dimension)]\n idx_v = span_v - self._degree_v + l\n for k in range(0, self._degree_u + 1):\n temp[:] = [tmp + (basis_u[k] * cp) for tmp, cp in zip(temp, self._control_points2D[idx_u + k][idx_v])]\n sptw[:] = [ptw + (basis_v[l] * tmp) for ptw, tmp in zip(sptw, temp)]\n\n # Divide by weight\n spt = [float(c / sptw[-1]) for c in sptw[0:(self._dimension - 1)]]\n\n return spt", "def _fit_model(self):\n # Determine location parameter from data\n floc = self._determine_loc()\n\n # Fit Weibull to data\n c, loc, scale = self.model.fit(self.ratio, self.c_guess, floc=floc)\n\n # Make Weibull-fitted cdf ratio\n self.fitted_ratio = self.model.pdf(self.bins, c, loc, scale)\n \n self.fitted_pars = {'c': c, 'loc': loc, 'scale': scale}\n self.pars = self.fitted_pars", "def fit(self, X):", "def test_homoscedastic_least_squares_roptimal_design(self):\n poly_degree = 1;\n num_design_pts = 2\n design_samples = np.linspace(-1,1,num_design_pts)\n noise_multiplier = None\n design_factors = univariate_monomial_basis_matrix(\n poly_degree,design_samples)\n num_pred_pts = 3\n pred_samples = np.random.uniform(-1,1,num_pred_pts)\n pred_factors=univariate_monomial_basis_matrix(poly_degree,pred_samples)\n\n opts = {'beta':0,'pred_factors':pred_factors,\n 'pred_samples':pred_samples[np.newaxis,:],'nonsmooth':False}\n \n opt_problem = AlphabetOptimalDesign('R',design_factors,opts=opts)\n solver_opts = {'disp':True,'iprint': 0, 'ftol':1e-12,'maxiter':2000}\n #solver_opts = {'solver':'ipopt','print_level':0,\n # 'tol':1e-8,'acceptable_obj_change_tol':1e-8,\n # 'derivative_test':'first-order','maxiter':1000}\n #solver_opts.update({'constraint_jacobianstructure':partial(get_r_oed_jacobian_structure,num_pred_pts,num_design_pts)})\n mu_R ,res= opt_problem.solve(solver_opts,return_full=True)\n homog_outer_prods = compute_homoscedastic_outer_products(design_factors)\n variance = compute_prediction_variance(\n mu_R,pred_factors,homog_outer_prods)\n assert (res.x[0]<=variance.min())\n\n\n del opts['beta']\n if 'constraint_jacobianstructure' in solver_opts:\n del solver_opts['constraint_jacobianstructure']\n opt_problem = AlphabetOptimalDesign('I',design_factors,opts=opts)\n mu_I = opt_problem.solve(solver_opts)\n variance = compute_prediction_variance(\n mu_I,pred_factors,homog_outer_prods)\n assert np.allclose(mu_R,mu_I)", "def fit(cls, vertices):\n orientation = np.identity(3)\n translation = np.zeros((3, 1))\n scale = np.zeros(3)\n\n # The scale would remain invariant under rotation and translation.\n # We can safely estimate the scale from the oriented box.\n for axis in range(3):\n for edge_id in range(4):\n # The edges are stored in quadruples according to each axis\n begin, end = EDGES[axis * 4 + edge_id]\n scale[axis] += np.linalg.norm(vertices[begin, :] - vertices[end, :])\n scale[axis] /= 4.\n\n x = cls.scaled_axis_aligned_vertices(scale)\n system = np.concatenate((x, np.ones((NUM_KEYPOINTS, 1))), axis=1)\n solution, _, _, _ = lstsq(system, vertices, rcond=None)\n orientation = solution[:3, :3].T\n translation = solution[3, :3]\n \n return orientation, translation, scale", "def fit_on_trace(self):\r\n fit = CurveFit\\\r\n (self.weight_trace_data, self.Vp*1000/10, camera_fps = self.samplingrate_cam, DAQ_sampling_rate = self.samplingrate_display_curve, \\\r\n main_directory = self.main_directory, rhodopsin = self.Construct_name.text()) \r\n fit.Photobleach()\r\n fit.IsolatePeriods()\r\n fit.TransformCurves()\r\n fit.CurveAveraging()\r\n fit.fit_on_averaged_curve()\r\n fit.ExponentialFitting()\r\n fit.extract_sensitivity()\r\n fit.Statistics()", "def localpoly(self, x):\n n = self.X.shape[0]\n K_i = 1 / self.h * self.kernel(x, self.X, self.h)\n f_i = 1 / n * sum(K_i)\n\n if f_i == 0: # doesnt really happen, but in order to avoid possible errors\n W_hi = np.zeros(n)\n else:\n W_hi = K_i / f_i\n\n X1 = np.ones(n)\n X2 = self.X - x\n X3 = X2 ** 2\n\n X = np.array([X1, X2, X3]).T\n W = np.diag(W_hi) # (n,n)\n\n XTW = (X.T).dot(W) # (3,n)\n XTWX = XTW.dot(X) # (3,3)\n XTWy = XTW.dot(self.y) # (3,1)\n\n beta = np.linalg.pinv(XTWX).dot(XTWy) # (3,1)\n return {\"fit\": beta[0], \"first\": beta[1], \"second\": beta[2], \"weight\": W_hi}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit and optionally remove the best fitting plane p u + q v = w
def fit_uvwplane(vis: Visibility, remove=True) -> (Image, float, float): nvis = len(vis.data) before = numpy.max(numpy.std(vis.w)) p, q = fit_uvwplane_only(vis) residual = vis.data['uvw'][:, 2] - (p * vis.u + q * vis.v) after = numpy.max(numpy.std(residual)) log.debug('fit_uvwplane: Fit to %d rows reduces rms w from %.1f to %.1f m' % (nvis, before, after)) if remove: vis.data['uvw'][:, 2] -= p * vis.u + q * vis.v return vis, p, q
[ "def fit_uvwplane_only(vis: Visibility) -> (float, float):\n \n su2 = numpy.sum(vis.u * vis.u)\n sv2 = numpy.sum(vis.v * vis.v)\n suv = numpy.sum(vis.u * vis.v)\n suw = numpy.sum(vis.u * vis.w)\n svw = numpy.sum(vis.v * vis.w)\n det = su2 * sv2 - suv ** 2\n p = (sv2 * suw - suv * svw) / det\n q = (su2 * svw - suv * suw) / det\n return p, q", "def remove_polyfit(arr, **kwargs):\n \n return arr - hbt.polyfit(arr, **kwargs)", "def fit_plane(x, y, z):\n pts = np.isfinite(z)\n if len(z.shape) > 1:\n x, y = np.meshgrid(x, y)\n xx, yy = x[pts].flatten(), y[pts].flatten()\n else:\n xx, yy = x, y\n\n flat = np.ones(xx.shape)\n\n coefs = np.linalg.lstsq(np.stack([xx, yy, flat]).T, z[pts].flatten(), rcond=None)[0]\n plane_fit = coefs[0] * x + coefs[1] * y + coefs[2]\n return plane_fit", "def fit(self, z, w=None):\n return super().fit(z, w=w)", "def _fit(self, X, y, w):\n pass", "def fit_on_trace(self):\r\n fit = CurveFit\\\r\n (self.weight_trace_data, self.Vp*1000/10, camera_fps = self.samplingrate_cam, DAQ_sampling_rate = self.samplingrate_display_curve, \\\r\n main_directory = self.main_directory, rhodopsin = self.Construct_name.text()) \r\n fit.Photobleach()\r\n fit.IsolatePeriods()\r\n fit.TransformCurves()\r\n fit.CurveAveraging()\r\n fit.fit_on_averaged_curve()\r\n fit.ExponentialFitting()\r\n fit.extract_sensitivity()\r\n fit.Statistics()", "def fit(self, star):\n star1 = self.chisq(star) # Get chisq Taylor expansion for linearized model\n ### Check for non-pos-def\n ###S = np.linalg.svd(star1.fit.alpha,compute_uv=False)\n ###print(\" .in fit(), min SV:\",np.min(S))###\n ###U,S,Vt = np.linalg.svd(star1.fit.alpha,compute_uv=True)\n ###print(\" ..in fit(), min SV:\",np.min(S))###\n\n # star1 has marginalized over flux (& center, if free), and updated these\n # for best linearized fit at the input parameter values.\n if self._degenerate:\n # Do SVD and retain\n # input values for degenerate parameter combinations\n # U,S,Vt = np.linalg.svd(star1.fit.alpha)\n S,U = np.linalg.eigh(star1.fit.alpha)\n # Invert, while zeroing small elements of S.\n # \"Small\" will be taken to be causing a small chisq change\n # when corresponding PSF component changes by the full flux of PSF\n small = 0.2 * self.pixel_area * self.pixel_area\n if np.any(S < -small):\n print(\"negative: \",np.min(S),\"small:\",small)###\n raise ValueError(\"Negative singular value in alpha matrix\")\n # Leave values that are close to zero equal to zero in inverse.\n nonzero = np.abs(S) > small\n invs = np.zeros_like(S)\n invs[nonzero] = 1./S[nonzero]\n\n ###print('S/zero:',S.shape,np.count_nonzero(np.abs(S)<=small),'small=',small) ###\n ###print(' ',np.max(S[np.abs(S)<=small]),np.min(S[np.abs(S)>small])) ##\n # answer = V * S^{-1} * U^T * beta\n # dparam = np.dot(Vt.T, invs * np.dot(U.T,star1.fit.beta))\n dparam = np.dot(U, invs * np.dot(U.T,star1.fit.beta))\n else:\n # If it is known there are no degeneracies, we can skip SVD\n dparam = np.linalg.solve(star1.fit.alpha, star1.fit.beta)\n # ??? dparam = scipy.linalg.solve(alpha, beta, sym_pos=True) would be faster\n # Create new StarFit, update the chisq value. Note no beta is returned as\n # the quadratic Taylor expansion was about the old parameters, not these.\n starfit2 = StarFit(star1.fit.params + dparam,\n flux = star1.fit.flux,\n center = star1.fit.center,\n alpha = star1.fit.alpha, # Inverse covariance matrix\n chisq = star1.fit.chisq \\\n + np.dot(dparam, np.dot(star1.fit.alpha, dparam)) \\\n - 2 * np.dot(star1.fit.beta, dparam))\n return Star(star1.data, starfit2)", "def _weibull_fit(self):\n if not self._is_random:\n curr_avg = np.mean(self._relmat, axis=1)\n x_val = np.array(range(0, len(curr_avg)))\n curr_model = Model(weibull_model)\n curr_model.set_param_hint('c', value = 1, min = 0, max = np.inf)\n curr_model.set_param_hint('a', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('b', value=1, min=0, max=np.inf)\n pars = curr_model.make_params()\n result = curr_model.fit(curr_avg,\n k = x_val,\n params=pars,method=\"leastsq\")\n pred_x = np.array(range(0, self._pred_x))\n self._pred_y = result.eval(params=result.params, k=pred_x)\n else:\n for i in range(0, self._shuffle_time):\n curr_idx = np.random.choice(self._rnum, self._csys, replace=False)\n curr_relmat = self._relmat[:, curr_idx]\n curr_avg = np.mean(curr_relmat, axis=1)\n x_val = np.array(range(0, len(curr_avg))) + 1\n curr_model = Model(weibull_model)\n curr_model.set_param_hint('c', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('a', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('b', value=1, min=0, max=np.inf)\n pars = curr_model.make_params()\n result = curr_model.fit(curr_avg,\n k=x_val,\n params=pars, method=\"leastsq\")\n pred_x = np.array(range(0, self._pred_x))\n self._pred_y += result.eval(params=result.params, k=pred_x)\n self._pred_y /= self._shuffle_time", "def fit_if_needed(self):\n if not self._fitted:\n self.fit()", "def fit_image(self):\n self.params = self.all_params['Fit 0']\n self.fit_results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n #report_fit(self.fit_results)\n sel.fparams = self.fit_results.params", "def test_homoscedastic_least_squares_roptimal_design(self):\n poly_degree = 1;\n num_design_pts = 2\n design_samples = np.linspace(-1,1,num_design_pts)\n noise_multiplier = None\n design_factors = univariate_monomial_basis_matrix(\n poly_degree,design_samples)\n num_pred_pts = 3\n pred_samples = np.random.uniform(-1,1,num_pred_pts)\n pred_factors=univariate_monomial_basis_matrix(poly_degree,pred_samples)\n\n opts = {'beta':0,'pred_factors':pred_factors,\n 'pred_samples':pred_samples[np.newaxis,:],'nonsmooth':False}\n \n opt_problem = AlphabetOptimalDesign('R',design_factors,opts=opts)\n solver_opts = {'disp':True,'iprint': 0, 'ftol':1e-12,'maxiter':2000}\n #solver_opts = {'solver':'ipopt','print_level':0,\n # 'tol':1e-8,'acceptable_obj_change_tol':1e-8,\n # 'derivative_test':'first-order','maxiter':1000}\n #solver_opts.update({'constraint_jacobianstructure':partial(get_r_oed_jacobian_structure,num_pred_pts,num_design_pts)})\n mu_R ,res= opt_problem.solve(solver_opts,return_full=True)\n homog_outer_prods = compute_homoscedastic_outer_products(design_factors)\n variance = compute_prediction_variance(\n mu_R,pred_factors,homog_outer_prods)\n assert (res.x[0]<=variance.min())\n\n\n del opts['beta']\n if 'constraint_jacobianstructure' in solver_opts:\n del solver_opts['constraint_jacobianstructure']\n opt_problem = AlphabetOptimalDesign('I',design_factors,opts=opts)\n mu_I = opt_problem.solve(solver_opts)\n variance = compute_prediction_variance(\n mu_I,pred_factors,homog_outer_prods)\n assert np.allclose(mu_R,mu_I)", "def unfreeze_adaptive_regularizer_param(self):\n if self.local_weights_hook is not None and self.local_weights_hook_flag:\n print(\"the local adaptive smoother weight is unlocked\")\n self.local_weights_hook.remove()\n self.local_weights_hook_flag = False", "def modelFit(fit, spectrum, photometry, photDict, specDict, filtDict, d='', sig_d='', exclude=[], plot=False, Rlim=(0,100), Tlim=(700,3000), title='', weighting=True, verbose=False, save=''):\n for b in photometry.keys():\n if 'unc' not in b:\n if not photometry[b] or not photometry[b+'_unc']: photometry.pop(b), photometry.pop(b+'_unc')\n \n fit_list, unfit_list, phot_fit = [], [], False\n for k in specDict:\n try:\n w, f = specDict[k]['wavelength'], specDict[k]['flux'] \n good, const = goodness(photometry if phot_fit else spectrum, model_dict[k] if phot_fit else rebin_spec([specDict[k]['wavelength'],specDict[k]['flux']], spectrum[0], waveunits='um'), filt_dict=filtDict, exclude=exclude, weighting=weighting)\n R, sig_R = (d*np.sqrt(float(const))/ac.R_jup).decompose().value, (sig_d*np.sqrt(float(const))/ac.R_jup).decompose().value if sig_d else ''\n if R>Rlim[0] and R<Rlim[1]: fit_list.append([abs(good), k, float(const), R, sig_R, None, [w,f*const]])\n else: unfit_list.append([abs(good), k, float(const), R, sig_R, None, [w,f*const]])\n except: pass\n \n best = sorted(fit_list)[0]\n best_unc = np.empty(len(best[-1][0]))\n best_unc.fill(np.sqrt(sum(np.array([photometry[i] for i in photometry if 'unc' in i and photometry[i]])**2)))\n final_spec = [best[-1][0]*q.um, best[-1][1]*q.erg/q.s/q.cm**2/q.AA, best_unc*q.erg/q.s/q.cm**2/q.AA]\n\n if plot:\n from itertools import groupby\n fig, to_print = plt.figure(), []\n for (ni,li) in [('fl',fit_list),('ul',unfit_list)]:\n for key,group in [[k,list(grp)] for k,grp in groupby(sorted(li, key=lambda x: x[1].split()[1]), lambda x: x[1].split()[1])]:\n g, p, c, r, l, sp = zip(*sorted(group, key=lambda x: int(x[1].split()[0])))\n plt.plot([int(t.split()[0]) for t in p], g, 'o' if ni=='fl' else 'x', ls='-' if ni=='fl' else 'none', color=plt.cm.jet((5.6-float(key))/2.4,1), label=key if ni=='fl' else None)\n to_print += zip(*[p,g,r,l])\n plt.legend(loc=0), plt.grid(True), plt.yscale('log'), plt.ylabel('Goodness of Fit'), plt.xlabel('Teff'), plt.suptitle(plot) \n if save: plt.savefig(save+' - fit plot.png')#, printer(['Params','G','radius','Lbol'], to_print, to_txt=save+' - fit.txt')\n\n return [best[1].split()[0], 1, best[1].split()[1], 1, best[1]]\n # return [final_spec, best[1], best[2], best[3], best[4], best[5]]", "def BestFitMapping(self) -> bool:", "def disable_fit(self, parameter):\n\n obj = self._model if parameter in self._model.fittingParameters \\\n else self._observed\n\n name, latex, fget, fset, mode, to_fit, bounds = \\\n obj.fittingParameters[parameter]\n\n to_fit = False\n\n obj.fittingParameters[parameter] = (\n name, latex, fget, fset, mode, to_fit, bounds)", "def prune(self):\n hypBest = self.getHypothesisByPosition(self.bestPosition)\n if hypBest.failed:\n log_error(\"[AllInOneCubePruner] failed to build 0,0,0 hyp !!!\")\n stack_output = []\n self.lastBestScore = 0\n else:\n stack_output = [hypBest]\n self.lastBestScore = hypBest.getScore()\n\n while len(stack_output) < self.size:\n # expand currently best lattice\n self.expandEdges()\n\n if self.needStop : break\n\n # pop the best hyp of edges to output\n hypBest = self.popBestHypothesis()\n self.lastBestScore = hypBest.getScore()\n stack_output.append( hypBest )\n\n stack_output.sort( key=lambda x:x.getScore() , reverse=True )\n return stack_output", "def test_plot_fit_not_implemented():\n plot_fit(display=False, fittype='not implemented')", "def compute_fit(self):\n self.z = np.polyfit(self.a, self.e, 2) # Getting the fit parameters\n self.f = np.poly1d(self.z) ## Getting the new function\n self.x_fit = np.linspace(self.a[0], self.a[-1], 100)\n self.y_fit = self.f(self.x_fit)\n\n # Similarly for the volume\n self.vz = np.polyfit(self.v, self.e, 2) # Getting the fit parameters\n self.vf = np.poly1d(self.vz) ## Getting the new function\n self.v_x_fit = np.linspace(self.v[0], self.v[-1], 100)\n self.v_y_fit = self.vf(self.v_x_fit)\n\n # Getting the minimum energy\n self.E_optimized = min(self.y_fit)\n self.E_optimized_printable = self.E_optimized.astype(np.float)\n\n # Getting the optimized lattice constant\n self.min_index = np.argmin(self.y_fit)\n self.a0_optimized = self.x_fit.flat[self.min_index]\n self.v0_optimized = self.v_x_fit.flat[self.min_index] # There are four primitive cells in a single conventional cell\n\n # Calculations\n # Getting the double derivative using a 2nd degree polynomial\n self.dda0 = 2*self.z[0]#.flat[0]\n self.ddv0 = 2*self.vz[0]#.flat[0]\n self.B = eVA32GPa(self.v0_optimized*self.ddv0) # 1 eV/Angstrom3 = 160.21766208 GPa", "def test_no_fit(self):\n p_max = 0.02\n freqs = np.asarray([random.randint(0, 20) for _ in range(100)])\n _, p = traces.zipf_fit(freqs)\n self.assertLessEqual(p, p_max)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invert using time slices (top level function) Use the image im as a template. Do PSF in a separate call.
def invert_timeslice(vis: Visibility, im: Image, dopsf=False, normalize=True, **kwargs) -> (Image, numpy.ndarray): log.info("invert_timeslice: inverting using time slices") return invert_with_vis_iterator(vis, im, dopsf, vis_iter=vis_timeslice_iter, normalize=normalize, invert=invert_timeslice_single, **kwargs)
[ "def scale_invert(raw_path, proc_path,height,width):\n \n im = Image.open(raw_path)\n \n # rescale\n raw_width, raw_height = im.size\n new_width = int(round(raw_width * (height / raw_height)))\n im = im.resize((new_width, height), Image.NEAREST)\n im_map = list(im.getdata())\n im_map = np.array(im_map)\n im_map = im_map.reshape(height, new_width).astype(np.uint8)\n\n # Invert and add background (black - 255) \n data = np.full((height, width - new_width + 1), 255)\n im_map = np.concatenate((im_map, data), axis=1)\n im_map = im_map[:, 0:width]\n im_map = (255 - im_map)\n im_map = im_map.astype(np.uint8)\n im = Image.fromarray(im_map)\n\n \n im.save(str(proc_path), \"png\")\n print(\"Processed image saved: \" + str(proc_path))", "def invert_intensity_image_filter(*args, **kwargs):\n import itk\n instance = itk.InvertIntensityImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def pv_slice_series_overlay():\n # Load PV info\n # path_filename_short = \"catalogs/m16_pv_vectors_2.reg\"; n_steps = 60\n path_filename_short = \"catalogs/m16_pv_vectors_3.reg\"; n_steps = 85\n path_info = pvdiagrams.linear_series_from_ds9(catalog.utils.search_for_file(path_filename_short), n_steps=n_steps)\n path_stub = os.path.split(path_filename_short)[-1].replace('.reg', '')\n pv_vel_lims = (8*kms, 35*kms)\n pv_vel_intervals = np.arange(16, 33, 2)\n\n # Load cubes\n img_stub = 'ciiAPEX'\n img_cube_obj = cube_utils.CubeData(get_map_filename(img_stub)).convert_to_kms()\n\n contour_stub = '12co32'\n contour_cube_obj = cube_utils.CubeData(get_map_filename(contour_stub)).convert_to_kms()\n\n # Reference image\n ref_vel_lims = (10*kms, 35*kms)\n ref_mom0 = img_cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_img = ref_mom0.to_value()\n ref_contour_mom0 = contour_cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_contour = reproject_interp((ref_contour_mom0.to_value(), ref_contour_mom0.wcs), ref_mom0.wcs, ref_mom0.shape, return_footprint=False)\n\n # Colors\n ref_img_cmap = 'Greys_r'\n ref_contour_cmap = 'magma_r'\n pv_img_cmap = 'plasma'\n pv_img_contours_color = 'k'\n pv_contour_cmap = 'cool'\n reg_color = 'LimeGreen'\n\n \"\"\"\n go thru and look at run_plot_and_save_series and plot_path in pvdiagrams.py\n will need to iterate somewhat manually using cues from these two functions\n \"\"\"\n\n # Colorscale limits\n pv_vmaxes = {'ciiAPEX': 20, '12co32': 30, '13co32': 15}\n pv_levels = {'ciiAPEX': (3, 37, 4), '12co32': (5, 41, 5), '13co32': (1, 27, 2.5)}\n def _get_levels(line_stub):\n \"\"\"\n Get levels from the above dictionary. Return None if not present.\n \"\"\"\n if line_stub in pv_levels:\n return np.arange(*pv_levels[line_stub])\n else:\n return None\n\n\n img_cube = img_cube_obj.data.spectral_slab(*pv_vel_lims)\n contour_cube = contour_cube_obj.data.spectral_slab(*pv_vel_lims)\n\n # path_info is: center_coord, length_scale, path_generator\n path_generator = path_info[2]\n for i, p in enumerate(path_generator):\n\n # if i%3 != 0 and i < 44:\n # if i != 14:\n\n if os.path.isfile(f\"/home/ramsey/Pictures/2023-06-13/m16_pv_{path_stub}_{i:03d}.png\"):\n continue\n\n sl_img = pvextractor.extract_pv_slice(img_cube, p)\n sl_contour_raw = pvextractor.extract_pv_slice(contour_cube, p)\n sl_contour_raw.header['RESTFRQ'] = sl_img.header['RESTFRQ']\n sl_wcs = WCS(sl_img.header)\n sl_contour = reproject_interp((sl_contour_raw.data, sl_contour_raw.header), sl_wcs, shape_out=sl_img.data.shape, return_footprint=False)\n\n fig = plt.figure(figsize=(10, 9))\n gs = fig.add_gridspec(2, 1, height_ratios=[1, 1])\n\n # Reference image\n ax_ref = fig.add_subplot(gs[0,0], projection=ref_mom0.wcs)\n cbar_ax = ax_ref.inset_axes([1, 0, 0.05, 1])\n cbar_ax2 = ax_ref.inset_axes([0, 1, 1, 0.05])\n\n im = ax_ref.imshow(ref_img, origin='lower', cmap=ref_img_cmap, vmin=0)\n cbar = fig.colorbar(im, cax=cbar_ax, label=f\"{get_data_name(img_stub)} ({ref_mom0.unit.to_string('latex_inline')})\")\n ax_ref.text(0.05, 0.93, make_vel_stub(ref_vel_lims), color='k', ha='left', va='bottom', transform=ax_ref.transAxes)\n\n cs = ax_ref.contour(ref_contour, cmap=ref_contour_cmap, linewidths=0.5, alpha=0.6)\n cbar = fig.colorbar(cs, cax=cbar_ax2, location='top', spacing='proportional', label=f\"{get_data_name(contour_stub)} ({ref_contour_mom0.unit.to_string('latex_inline')})\")\n\n ax_ref.plot([c.ra.deg for c in p._coords], [c.dec.deg for c in p._coords], color=reg_color, linestyle='-', lw=1, transform=ax_ref.get_transform('world'))\n ax_ref.text(p._coords[0].ra.deg, p._coords[0].dec.deg + 4*u.arcsec.to(u.deg), 'Offset = 0\\\"', color=reg_color, fontsize=10, va='center', ha='right', transform=ax_ref.get_transform('world'))\n\n # Plot the footprint of the overlay if it would be visible at all\n overlay_nan_map = np.isnan(ref_contour)\n if np.any(overlay_nan_map):\n ax_ref.contour(overlay_nan_map.astype(float), levels=[0.5], colors='SlateGray', linestyles=':', linewidths=1)\n del overlay_nan_map\n\n # Beams\n beam_patch_kwargs = dict(alpha=0.9, hatch='////')\n beam_x, beam_y = 0.93, 0.1\n beam_ecs = [['white', 'grey'], [cs.cmap(cs.norm(cs.levels[j])) for j in [0, 2]]]\n for j, cube in enumerate((img_cube, contour_cube)):\n # Beam is known, plot it\n patch = cube.beam.ellipse_to_plot(*(ax_ref.transAxes + ax_ref.transData.inverted()).transform([beam_x, beam_y]), misc_utils.get_pixel_scale(ref_mom0.wcs))\n patch.set(**beam_patch_kwargs, facecolor=beam_ecs[j][0], edgecolor=beam_ecs[j][1])\n ax_ref.add_artist(patch)\n beam_x -= 0.03\n\n\n # PV diagram\n ax_pv = fig.add_subplot(gs[1,0], projection=sl_wcs)\n cbar_ax = ax_pv.inset_axes([1, 0, 0.05, 1])\n # Image\n im = ax_pv.imshow(sl_img.data, origin='lower', cmap=pv_img_cmap, vmin=0, vmax=pv_vmaxes.get(img_stub, None), aspect=(sl_img.data.shape[1]/(2.5*sl_img.data.shape[0])))\n cbar = fig.colorbar(im, cax=cbar_ax, label=img_cube.unit.to_string('latex_inline'))\n # Contours\n cs = ax_pv.contour(sl_img.data, colors=pv_img_contours_color, linewidths=1, linestyles=':', levels=_get_levels(img_stub))\n for l in cs.levels:\n cbar.ax.axhline(l, color=pv_img_contours_color)\n cs = ax_pv.contour(sl_contour, cmap=pv_contour_cmap, linewidths=1.5, levels=_get_levels(contour_stub), vmax=pv_vmaxes.get(contour_stub, None))\n for l in cs.levels:\n cbar.ax.axhline(l, color=cs.cmap(cs.norm(l)))\n\n # Plot horizontal gridlines\n xlim = ax_pv.get_xlim() # save existing xlim to reintroduce them later\n x_length = p._coords[0].separation(p._coords[1]).deg\n for v in pv_vel_intervals: # these mess up the xlim\n ax_pv.plot([0, x_length], [v*1e3]*2, color='grey', alpha=0.7, linestyle='--', transform=ax_pv.get_transform('world'))\n # Label observation names\n ax_pv.text(0.05, 0.95, \"Image: \" + cube_utils.cubenames[img_stub], fontsize=13, color=marcs_colors[1], va='top', ha='left', transform=ax_pv.transAxes)\n ax_pv.text(0.05, 0.90, \"Contour: \" + cube_utils.cubenames[contour_stub], fontsize=13, color='w', va='top', ha='left', transform=ax_pv.transAxes)\n # Put xlim back in\n ax_pv.set_xlim(xlim)\n\n\n ax_pv.coords[1].set_format_unit(u.km/u.s)\n ax_pv.coords[1].set_major_formatter('x.xx')\n ax_pv.coords[0].set_format_unit(u.arcsec)\n ax_pv.coords[0].set_major_formatter('x.xx')\n\n plt.tight_layout()\n\n # 2023-06-12,13\n savename = f\"/home/ramsey/Pictures/2023-06-13/m16_pv_{path_stub}_{i:03d}.png\"\n fig.savefig(savename, metadata=catalog.utils.create_png_metadata(title='pv movie',\n file=__file__, func='pv_slice_series_overlay'))\n\n plt.close(fig)", "def inverse_transform(self, matrix):\n #return np.fft.ifft(matrix) #just wanted to see what is to be expected\n sx = matrix.shape[0]\n sy = matrix.shape[1]\n N = max(matrix.shape[0], matrix.shape[1])\n newimage = np.zeros((sx,sy),dtype=np.complex)\n for u in range(sx):\n for v in range(sy):\n t = 0\n\n for i in range(sx):\n for j in range(sy):\n t = t + ((matrix[i, j] * (math.cos(((math.pi * 2) / N) * ((u * i) + (v * j))) - (\n ((1j) * math.sin(((math.pi * 2) / N) * ((u * i) + (v * j))))))))\n\n #t = t + (matrix[i,j]*math.exp((1j.imag)*((2*math.pi)/N)*((u*i) +(v*j))))\n\n #t = t + (matrix[i, j] * (math.cos(((math.pi * 2) / N) * ((u * i) + (v * j))) + (\n #(((1j).imag) * math.sin(((math.pi * 2) / N) * ((u * i) + (v * j)))))))\n\n newimage[u, v] = t #round(t)\n\n if (False):\n for u in range(sx):\n for v in range(sy):\n newimage[u,v] = math.floor(math.log(abs(newimage[u,v])))\n\n return newimage", "def _inverse_transform(self, i, x):\n inv_interp_func = interp1d(self.interp_func_[i].y, self.interp_func_[i].x, kind=self.interp_kind,\n copy=self.interp_copy, fill_value=self.fill_value)\n return inv_interp_func(erf(x))", "def invert(self):\n self.image = ImageOps.invert(self.image).convert(o.device_mode)\n self.display_if_interactive()", "def test_local_inversion(invertible_xform, to_invert, im, dict_key=None):\n im_item = im if dict_key is None else im[dict_key]\n if not isinstance(im_item, MetaTensor):\n return\n im_ref = copy.deepcopy(im)\n im_inv = invertible_xform.inverse(to_invert)\n if dict_key:\n im_inv = im_inv[dict_key]\n im_ref = im_ref[dict_key]\n np.testing.assert_array_equal(im_inv.applied_operations, [])\n assert_allclose(im_inv.shape, im_ref.shape)\n assert_allclose(im_inv.affine, im_ref.affine, atol=1e-3, rtol=1e-3)", "def cycle_frames_overlay(bg_img, img_vols, time_axis=-1, anim_kw=dict(interval=50, blit=True),\n imshow_kw={}, alpha_image=None):\n ndim = img_vols.ndim\n if ndim < 3 or ndim > 5:\n raise ValueError(\"input data must be 3D, 4D or 5D\")\n if ndim < 5:\n montage_func = montager\n elif ndim == 5:\n montage_func = montager4d\n\n slices = [slice(None), ] * img_vols.ndim\n\n fig = plt.figure()\n fig.patch.set_visible = False\n\n frame = 0\n if 'cmap' not in imshow_kw:\n imshow_kw['cmap'] = plt.get_cmap('gray')\n slices[time_axis] = frame\n nframes = img_vols.shape[-1]\n im = plt.imshow(montage_func(img_vols[slices]),\n **imshow_kw)\n plt.axis('off')\n im.axes.set_visible = False\n\n def updatefig(frame, *args):\n frame = frame % nframes\n slices[time_axis] = frame\n im.set_array(montage_func(img_vols[slices]))\n return im,\n\n ani = animation.FuncAnimation(fig, updatefig, **anim_kw)\n plt.show()\n return ani", "def rolling_shutter(img):\n pass", "def inverse_transform(r,t):\n r = r.transpose()\n t = - r*t\n return r,t", "def manual_pv_slice_series():\n\n \"\"\"\n PV cut orientation, vertical or horizontal\n Vertical means slice at a single RA and plot velocity vs Dec\n Horizontal means slice at a single Dec and plot velocity vs RA\n \"\"\"\n orientation = 'horizontal'\n start_idx, step_idx = 25, 50\n\n # Load cube\n line_stub = 'cii'\n if line_stub in large_map_filenames:\n # Use the custom filename rather than the default\n filename = large_map_filenames[line_stub]\n else:\n # Use default filename from cube_utils (many of these are centered around Pillars)\n filename = line_stub\n cube_obj = cube_utils.CubeData(filename).convert_to_K().convert_to_kms()\n dimension_size = (cube_obj.data.shape[2] if orientation=='vertical' else cube_obj.data.shape[1])\n\n # Make image\n ref_vel_lims = (10*kms, 35*kms)\n ref_mom0 = cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_img = ref_mom0.to_value()\n\n # Set colors\n pv_cmap = 'plasma'\n img_cmap = 'Greys_r'\n line_color = marcs_colors[1]\n\n # Loop thru slice index\n for slice_idx in range(start_idx, dimension_size, step_idx):\n\n if orientation == 'vertical':\n # Cube index order is V,Y,X = Velocity,Dec,RA = V,I,J\n cube_slices = (slice(None), slice(None), slice_idx)\n else:\n cube_slices = (slice(None), slice_idx, slice(None))\n\n pv_slice = cube_obj.data[cube_slices]\n\n # First try to remake fig/axes each time. Try persistent if slow\n fig = plt.figure(figsize=(8, 10))\n gs = fig.add_gridspec(2, 1)\n ax_img = fig.add_subplot(gs[0,0], projection=cube_obj.wcs_flat)\n ax_pv = fig.add_subplot(gs[1,0], projection=pv_slice.wcs)\n\n im = ax_img.imshow(ref_img, origin='lower', vmin=0, cmap=img_cmap)\n fig.colorbar(im, ax=ax_img, label=ref_mom0.unit.to_string('latex_inline'))\n\n im = ax_pv.imshow(pv_slice.to_value(), origin='lower', vmin=0, cmap=pv_cmap)\n fig.colorbar(im, ax=ax_pv, label=pv_slice.unit.to_string('latex_inline'), orientation='horizontal')\n\n # Plot line\n if orientation == 'vertical':\n plot_line = ax_img.axvline\n else:\n plot_line = ax_img.axhline\n plot_line(slice_idx, color=line_color, linewidth=2)\n # Reference image velocity interval stamp\n ax_img.text(0.1, 0.9, make_vel_stub(ref_vel_lims), color=line_color, ha='left', va='bottom')\n\n # Clean up axes labels\n # ax_img.set_xlabel(\"RA\")\n # ax_img.set_ylabel(\"Dec\")\n ax_pv.coords[1].set_format_unit(kms)\n # 2023-04-26, 06-07\n savename = f\"/home/ramsey/Pictures/2023-04-26/m16_pv_{orientation}_{slice_idx:03d}.png\"\n fig.savefig(savename, metadata=catalog.utils.create_png_metadata(title=f'{line_stub}, using stub/file {filename}', file=__file__, func='manual_pv_slice_series'))", "def cycle_frames(img_vols, time_axis=-1, anim_kw=dict(interval=50, blit=True),\n imshow_kw={}):\n ndim = img_vols.ndim\n if ndim < 3 or ndim > 5:\n raise ValueError(\"input data must be 3D, 4D or 5D\")\n if ndim < 5:\n montage_func = montager\n elif ndim == 5:\n montage_func = montager4d\n\n slices = [slice(None), ] * img_vols.ndim\n\n fig = plt.figure()\n fig.patch.set_visible = False\n\n frame = 0\n if 'cmap' not in imshow_kw:\n imshow_kw['cmap'] = plt.get_cmap('gray')\n slices[time_axis] = frame\n nframes = img_vols.shape[-1]\n im = plt.imshow(montage_func(img_vols[slices]),\n **imshow_kw)\n plt.axis('off')\n im.axes.set_visible = False\n\n def updatefig(frame, *args):\n frame = frame % nframes\n slices[time_axis] = frame\n im.set_array(montage_func(img_vols[slices]))\n return im,\n\n ani = animation.FuncAnimation(fig, updatefig, **anim_kw)\n plt.show()\n return ani", "def main():\n # Import a image\n original_mt = SimpleImage('images/mt-rainier.jpg')\n # Show the original image\n original_mt.show()\n reflected = reflect('images/mt-rainier.jpg')\n # Show the vertically mirrored image\n reflected.show()", "def flip_vertical(image: Image) -> Image:\n \n\n flipped_image = copy(image)\n width = get_width(image)\n \n for y in range(get_height(flipped_image)):\n for x in range(get_width(flipped_image)):\n new_color = get_color(image, width - y - 1, y)\n set_color(flipped_image, width - y- 1, y, new_color)\n \n show(flipped_image) \n return flipped_image", "def inverse(im):\n return 255 - im", "def inv(transform):\n\n R = transform[0:3, 0:3]\n t = transform[0:3, 3]\n t_inv = -1 * R.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = R.T\n transform_inv[0:3, 3] = t_inv\n return transform_inv", "def resample(self, shape_matrix:tuple = (256, 256, 1024), shape_physic=(700, 700, 2000)) -> np.ndarray:\n spacing = self.nifti_img.GetSpacing()\n origin = self.nifti_img.GetOrigin()\n direction = self.nifti_img.GetDirection()\n size = self.nifti_img.GetSize()\n #target spacing, and size\n spacing_x = shape_physic[0]/shape_matrix[0] #mm\n spacing_y = shape_physic[1]/shape_matrix[1] #mm \n spacing_z = shape_physic[2]/shape_matrix[2] #mm\n\n true_x = size[0] * spacing[0] #mm\n true_y = size[1] * spacing[1] #mm \n true_z = size[2] * spacing[2] #mm\n\n new_size_x = int((true_x * shape_matrix[0]) / shape_physic[0]) #pixel\n new_size_y = int((true_y * shape_matrix[1]) / shape_physic[1]) #pixel\n new_size_z = int((true_z * shape_matrix[2]) / shape_physic[2]) #pixel\n\n #applied transformation\n transformation = sitk.ResampleImageFilter()\n transformation.SetOutputDirection(direction)\n transformation.SetOutputOrigin(origin)\n transformation.SetSize((new_size_x, new_size_y, new_size_z))\n transformation.SetOutputSpacing((spacing_x, spacing_y, spacing_z))\n transformation.SetInterpolator(sitk.sitkLinear)\n new_img = transformation.Execute(self.nifti_img) \n result = sitk.GetArrayFromImage(new_img) #[z,y,x]\n center = [int(shape_matrix[2]/2), int(shape_matrix[1]/2), int(shape_matrix[1]/2)]\n z = int(result.shape[0]/2)\n y = int(result.shape[1]/2)\n x = int(result.shape[2]/2)\n sommet_x = center[2] - x \n sommet_y = center[1] - y \n sommet_z = center[0] - z\n new_array = np.zeros((shape_matrix[2], shape_matrix[1], shape_matrix[0]))\n if result.shape[1] != shape_matrix[1] : \n new_array[sommet_z:sommet_z+result.shape[0], sommet_y:sommet_y + result.shape[1], sommet_x:sommet_x + result.shape[2]] = result\n else : \n new_array[sommet_z:sommet_z+result.shape[0],0:shape_matrix[1], 0:shape_matrix[0]] = result\n return new_array", "def trans_image(image,steer,trans_range, trans_y=False):\n\t\n\trows, cols, chan = image.shape\n\t\n\t# horizontal translation with 0.008 steering compensation per pixel\n\ttr_x = trans_range*np.random.uniform()-trans_range/2\n\tsteer_ang = steer + tr_x/trans_range*.4\n\t\n\t# option to disable vertical translation (vertical translation not necessary)\n\tif trans_y:\n\t\ttr_y = 40*np.random.uniform()-40/2\n\telse:\n\t\ttr_y = 0\n\t\n\tTrans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])\n\timage_tr = cv2.warpAffine(image,Trans_M,(cols,rows))\n\t\n\treturn image_tr,steer_ang", "def _invert_pv(self):\n # Compute wind from vorticity using div = 0\n vorticity = self.vorticity_spectral\n self._u, self._v = self.grid.wind(vorticity, np.zeros_like(vorticity))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predict using time slices.
def predict_timeslice(vis: Visibility, model: Image, **kwargs) -> Visibility: log.info("predict_timeslice: predicting using time slices") return predict_with_vis_iterator(vis, model, vis_iter=vis_timeslice_iter, predict=predict_timeslice_single, **kwargs)
[ "def predict(model, ts_test):\r\n n_periods = ts_test.shape[0]\r\n df_dates = model.make_future_dataframe(periods=n_periods, include_history=False)\r\n model_prediction = model.predict(df_dates)\r\n y_pred = model_prediction[['ds', 'yhat']]\r\n y_pred = y_pred.set_index('ds')\r\n y_pred['yhat'] = y_pred['yhat']\r\n return y_pred['yhat']", "def fit_predict(self, test_time_range):\n\t\t#Date boundaries for test\n\t\tfirst_date = test_time_range[0].date()\n\t\tlast_date = test_time_range[-1].date()\n\t\tn_days = (last_date - first_date).days + 1 #retrive days attribute from timedelta\n\t\t#Empty arrays to store forecast\n\t\tforecast_length = n_days * self._cycle_length #Length of forecast vectors\n\t\tdaily_seasonal_forecast = empty(forecast_length)\n\t\tweekly_seasonal_forecast = empty(forecast_length)\n\t\tdeseasonalized_forecast = empty(forecast_length)\n\t\t#Align linked list with beginning of test\n\t\ttrav = self._patterns._tail \n\t\twhile trav._date != first_date:\n\t\t\ttrav = trav._prev \n\t\t#Forecast routine\n\t\tk = 0 #Forecast day counter\n\t\twhile trav and trav._date != last_date + self._cycle_length * Timedelta(1, 'H'): #Traverse patterns day by day\n\t\t\tprint(\"Forecasting day \", k+1, \" of \", n_days) #Progress\n\t\t\t#deseasonalized component forecast\n\t\t\tdeseasonalized_comp = empty(self._cycle_length) #Empty vector to store values\n\t\t\tfor t in range(self._cycle_length):\n\t\t\t\telement = self._point_deseasonalized_forecast(trav, t)\n\t\t\t\tdeseasonalized_forecast[k*self._cycle_length + t] = element\t\n\t\t\tdaily_seasonal_forecast[k * self._cycle_length: (k+1) * self._cycle_length] = self._season_forecast(trav, 24) #First seasonal\tcomponent\n\t\t\tweekly_seasonal_forecast[k * self._cycle_length: (k+1) * self._cycle_length] = self._season_forecast(trav, 168) #Second seasonalcomponent\t\n\t\t\ttrav = trav._next #Move to next day\n\t\t\tk = k + 1 #Increase forecast day counter\n\t\t#Store predicitions in model - convert to pandas Series\n\t\tself._full_forecast = Series(deseasonalized_forecast + weekly_seasonal_forecast + daily_seasonal_forecast, index=test_time_range)\n\t\tself._deseasonalized_forecast = Series(deseasonalized_forecast, index=test_time_range)\n\t\tself._daily_seasonal_forecast = Series(daily_seasonal_forecast, index=test_time_range)\n\t\tself._weekly_seasonal_forecast = Series(weekly_seasonal_forecast, index=test_time_range)\n\n\t\treturn self._full_forecast", "def predict(data_prev, model):\r\n timesteps = len(data_prev)\r\n n_features = data_prev[0].size\r\n pred = p_model.predict(np.array(data_prev).reshape(1,timesteps,n_features)).reshape(-1)\r\n return pred", "def test_auto_predict_unit_timing_predict(self) -> None:\n my_module = torch.nn.Linear(2, 2)\n\n input_dim = 2\n dataset_len = 8\n batch_size = 2\n\n predict_dl = generate_random_iterable_dataloader(\n dataset_len, input_dim, batch_size\n )\n predict(\n TimingAutoPredictUnit(module=my_module),\n predict_dl,\n max_steps_per_epoch=1,\n timer=Timer(),\n )", "def predict(self, target_period=None, dataset_begin=None, dataset_end=None, debug=False):\n raise NotImplementedError('Not Implemented')", "def predict(model, X_test):", "def predict(self, data: arr_t, **kwargs) -> SleepWakeDataFrame:\n return _SleepWakeDataFrame(self.sleep_wake_algo.predict(data, **kwargs))", "def predict(self, ts, cat=None, encoding=\"utf-8\", num_samples=100, quantiles=[\"0.1\", \"0.5\", \"0.9\"], content_type=\"application/json\"):\n \n prediction_times=[]\n req=[]\n if type(ts)==list:\n prediction_times = [x.index[-1]+pd.Timedelta(1, unit=self.freq) for x in ts]\n req = self.__encode_request(ts, cat, encoding, num_samples, quantiles)\n elif type(ts)==dict:\n \n prediction_times=[]\n target_len=len(ts['target'])\n t0=ts['start']\n prediction_times.append(t0)\n \n \n \n req={\n 'instances': [ts],\n 'configuration': {\"num_samples\": 100, \"output_types\": [\"quantiles\"], \"quantiles\": [\"0.5\"]}#[\"0.1\", \"0.5\", \"0.9\"]}\n }\n req=json.dumps(req).encode('utf-8')\n elif type(ts)==bytes:\n prediction_times=[]\n req=ts\n print(\"IN HERE\")\n prediction_times.append(json.loads(req)['instances'][0]['start'])\n \n res = super(DeepARPredictor, self).predict(req, initial_args={\"ContentType\": content_type})\n \n return self.__decode_response(res, prediction_times, encoding)", "def predict(self, inputs, initial_state):\n\n # Compute time cell value\n output, state = self.cell(inputs, initial_state)\n\n return output, state", "def ss_Predict(self):\n \n self.ukf.predict() \n self.forecasts.append(self.ukf.x)\n self.base_model.step()\n self.truths.append(self.base_model.get_state(sensor=\"location\"))", "def predict_soil_moisture(current_date, array_of_values):\n # future_date = pd.to_datetime(current_date)\n future_date = current_date + timedelta(days=14)\n prediction = drought_SVR.predict(array_of_values)[0]\n return future_date, prediction", "def get_model_predictions_by_day(date):", "def get_predictive_travel_time(trip_id, req):\n\n # Get ssid array\n orig_ssid_array, dest_ssid_array = get_SSID_array(trip_id, req)\n\n # Get weather info\n rain, wind_speed = get_weather_info(req)\n\n # Prepare input parameter for model as dataframe\n in_df = pd.DataFrame({'WindSpeed': wind_speed, 'Rain': rain,\n 'Day': req['day'], 'HourFrame': int(req['time'].split(':')[0])}, index=[0])\n\n # Get predictive time of each SSID and sum up (unit: sec)\n # 0808 revised: return travling time of each segment and arrival time of each stop\n sum_travel_time = 0\n depart_orig_travel_time = 0\n orig_dest_travel_time = 0\n travel_time_list = []\n flag = False\n\n for ssid in dest_ssid_array:\n\n # Prepare dataframe to predict\n feature = pd.read_csv('SSID_model_features.csv')\n feature = feature.rename(columns={'14781479': 'SSID'})\n feature['SSID'] = feature['SSID'].apply(lambda x: str(x).zfill(8))\n frame = feature[feature.SSID == ssid]\n frame.dropna(axis=1, how='all', inplace=True)\n frame.drop(['SSID'], axis=1, inplace=True)\n frame.reset_index(drop=True, inplace=True)\n frame.columns = frame.iloc[0]\n frame.drop(0, axis=0, inplace=True)\n\n # Change data type\n set_to_zero = []\n frame['Rain'] = frame['Rain'].astype('float32')\n #frame['Rain'] = frame['WindSpeed'].astype('float32')\n frame['WindSpeed'] = frame['WindSpeed'].astype('float32')\n frame['JPID_length'] = frame['JPID_length'].astype(str).astype(int)\n for c in frame.columns:\n if c.find('HF') != -1 or c.find('Day') != -1 or c.find('SchoolHoliday') != -1:\n frame[c] = frame[c].apply(lambda x: int(float(x)))\n\n # Set value for input dataframe\n frame.set_value(index=1, col='Rain', value=rain)\n frame.set_value(index=1, col='WindSpeed', value=wind_speed)\n frame.set_value(index=1, col='HF_' + req['time'].split(':')[0], value=1)\n frame.set_value(index=1, col='Day_' + req['day'], value=1)\n\n # Get pickle file\n model = joblib.load('./SSID_XXXX_model_pkls/' + ssid + '.pkl')\n\n travel_time = model.predict(frame)[0]\n sum_travel_time += travel_time\n\n if flag:\n travel_time_list.append(travel_time)\n\n if ssid == orig_ssid_array[-1]:\n flag = True\n depart_orig_travel_time = sum_travel_time\n\n orig_dest_travel_time = sum_travel_time - depart_orig_travel_time\n\n return depart_orig_travel_time, orig_dest_travel_time, sum_travel_time, travel_time_list", "def get_predictive_timetable(req):\n\n # Get available trip_id\n trip_id_list = get_trip_id(req)\n all_orig_time_list = []\n\n # For each trip_id in list\n for trip_id in trip_id_list:\n\n # Get departure timetable list\n depart_times = get_timetable(trip_id, req)\n\n # Get predictive traveling time\n depart_orig_tt, orig_dest_tt, sum_tt, tt_list = get_predictive_travel_time(trip_id, req)\n\n # Request time: convert to \"datetime\"\n req_t = datetime.datetime.strptime(req['time'], \"%H:%M\")\n\n # Get ideal depart time = request time - predictive travel time\n ideal_depart_t = (req_t - datetime.timedelta(seconds=sum_tt)).time()\n\n # Store actual depart time base on timetales\n actual_depart_t = \"\"\n\n # Get recommend schedule time of previous, recommand and next\n for i in range(len(depart_times)):\n\n # Depart time: convert to \"datetime.time\"\n depart_t = datetime.datetime.strptime(depart_times[i], \"%H:%M:%S\").time()\n diff = datetime.datetime.strptime(depart_times[i], \"%H:%M:%S\") - (\n req_t - datetime.timedelta(seconds=sum_tt))\n\n # First time that is larger than idea time but difference cannot over 4 hours\n if (depart_t >= ideal_depart_t):\n # Consider first and last bus of that day.\n actual_depart_t = depart_times[i]\n break\n\n if actual_depart_t != \"\":\n at = (\n datetime.datetime.strptime(actual_depart_t, \"%H:%M:%S\") + datetime.timedelta(seconds=depart_orig_tt)).time()\n all_orig_time_list.append(at.strftime(\"%H:%M:%S\"))\n\n # Sort by time and pick the cloest time\n all_orig_time_list.sort()\n predictive_timetable_orig = all_orig_time_list[0]\n\n # Get all arrival time of stops passed by\n predictive_timetable_all = []\n acc_tt = predictive_timetable_orig\n predictive_timetable_all.append(acc_tt)\n\n for tt in tt_list:\n temp = (datetime.datetime.strptime(acc_tt, \"%H:%M:%S\") + datetime.timedelta(seconds=tt)).time()\n acc_tt = temp.strftime(\"%H:%M:%S\")\n predictive_timetable_all.append(acc_tt)\n\n return predictive_timetable_all, orig_dest_tt", "def _predict(self, t_fin,g_std, l_0 = None, t_0 = 0, dt = 3.,seed=None):\n t_fin+=1000*dt#ensure to be in stationary phase\n if seed is not None: np.random.seed(seed)\n l_0 = l_0 if l_0 is not None else self.mu #initialize at the mean\n t = np.arange(t_0, t_fin+dt ,dt)\n dw = self._dW(dt,len(t))\n for i,j in enumerate(t):\n if i == 0:\n x = np.array([l_0])\n else:\n x = np.append(x, x[i-1] + self._mu(x[i-1])*dt +\\\n self.sigma*dw[i-1])\n t=t[1000:]-t[1000]; x=x[1000:]#cancel the added 100 points\n xnoise = np.random.normal(loc=x ,scale = g_std*np.ones_like(x))\n return t, x, xnoise", "def predict_survival(self, t, is_lagged = False):\r\n return self.model.predict_survival(t, is_lagged)", "def predict(self):\n model_config = ModelConfigRegistry().get_model(self.model_name)\n if (\n self.model_name\n and not model_config.can_use_external_feature()\n and TIMESERIES_KEYS.FEAT_DYNAMIC_REAL in self.gluon_dataset.list_data[0]\n ):\n # remove external features from the ListDataset used for predictions if the model cannot use them\n gluon_dataset_without_external_features = remove_unused_external_features(\n self.gluon_dataset, self.frequency\n )\n forecasts = self.predictor.predict(gluon_dataset_without_external_features)\n else:\n forecasts = self.predictor.predict(self.gluon_dataset)\n\n forecasts_list = list(forecasts)\n\n forecasts_timeseries = self._compute_forecasts_timeseries(forecasts_list)\n\n multiple_df = concat_timeseries_per_identifiers(forecasts_timeseries)\n\n self.forecasts_df = concat_all_timeseries(multiple_df)\n\n self.time_column_name = self.gluon_dataset.list_data[0][TIMESERIES_KEYS.TIME_COLUMN_NAME]\n self.identifiers_columns = (\n list(self.gluon_dataset.list_data[0][TIMESERIES_KEYS.IDENTIFIERS].keys())\n if TIMESERIES_KEYS.IDENTIFIERS in self.gluon_dataset.list_data[0]\n else []\n )\n\n if self.include_history:\n self.forecasts_df = self._include_history(self.frequency, history_length_limit=self.history_length_limit)\n\n self.forecasts_df = add_row_origin(self.forecasts_df, both=ROW_ORIGIN.FORECAST, left_only=ROW_ORIGIN.HISTORY)\n\n self.forecasts_df = self.forecasts_df.rename(columns={\"index\": self.time_column_name})", "def run_to_predict_thermal_loads(self, weather_pred, output_horizon_in_h, output_resolution_in_s, t_act, actual_time, start_datetime, start_sim_inh, end_sim_inh, fnr):\n t_a_min = 200.0\n thermal_load_1 = []\n thermal_load_prediction = []\n horizont_time = actual_time + timedelta(hours=output_horizon_in_h)\n act_time_1 = actual_time\n act_time_2 = actual_time\n #while(actual_time <= horizont_time):\n n_steps = int(output_horizon_in_h // self.dt)\n #print('actual_time = {}; horizont = {} h ; Zeitschritt = {} h ; n_steps = {}'.format(actual_time, horizont_time, self.dt, n_steps))\n for step in range(n_steps):\n # read ambient temperature from the weather file\n act_time_1 = act_time_1 + timedelta(hours=self.dt)\n simtime_in_h = ((act_time_1 - start_datetime).total_seconds() / 3600.0) + start_sim_inh # simulationstime in h\n day = t_act // 24 # number of the day\n t_day = t_act - day * 24 # hour of the day\n # add value to array\n #cd = int(t_day / self.dt) # counter time in day\n # create output list\n t_day = t_day + step * self.dt\n if (t_day >= 24.0):\n t_day = t_day - 24.0\n cd = int(t_day / self.dt) # counter time in day\n # calculate average value from past days\n q_m = 0\n t_e_m = 0\n for i in range(0,self.n_day):\n q_m = q_m + self.q_s[i][cd]\n t_e_m = t_e_m + self.t_e_s[i][cd]\n q_m = q_m / self.n_day\n t_e_m = t_e_m / self.n_day\n\n # calculate parameters of linear curve\n a_1 = 0 # parameter\n a_2 = 0\n for i in range(0,self.n_day):\n a_1 = a_1 + ((self.t_e_s[i][cd] - t_e_m) * (self.q_s[i][cd] - q_m))\n a_2 = a_2 + (self.t_e_s[i][cd] - t_e_m) ** 2\n try: # prevent ZeroDivision\n m_c = a_1 / a_2 # slope of curve\n except:\n m_c = 0\n n_c = q_m - m_c * t_e_m # absolute element curve\n\n # determine ambient temperature from weather prediction\n #t_e_1day = utils.interpolate_value_from_list_of_dicts(value1, tag_of_val1, list_of_dicts, tag_of_result)\n t_e_1day = utils.interpolate_value_from_list_of_dicts(simtime_in_h, 'time_in_h', weather_pred, 'temp_in_C')\n\n # calculate predicted heat values\n q_1day = m_c * t_e_1day + n_c\n \n q_pred = q_1day\n thermal_load_1.append({'date':act_time_1, 'time_in_h': simtime_in_h, 'q_in_W': q_pred})\n \n #n_steps = int(output_horizon_in_h * 3600.0 // output_resolution_in_s)\n #n_day = int(output_horizon_in_h // self.n_values)\n #n_steps = self.n_values * n_day\n n_steps = int((output_horizon_in_h * 3600.0) // output_resolution_in_s)\n for step in range(n_steps):\n act_time_2 = act_time_2 + timedelta(seconds=output_resolution_in_s)\n simtime_in_h = ((act_time_2 - start_datetime).total_seconds() / 3600.0) + start_sim_inh # simulationstime in h\n q_pred = utils.interpolate_value_from_list_of_dicts(simtime_in_h, 'time_in_h', thermal_load_1, 'q_in_W')\n thermal_load_prediction.append({'date':act_time_2, 'time_in_h': simtime_in_h, 'q_in_W': q_pred})\n #thermal_load_prediction.append({act_time_2: q_pred})\n #print('actual_time = {}; simtime in h = {}; q_pred = {}'.format(act_time_2, simtime_in_h, q_pred))\n if(self.dbg > 0):\n H = open(\"./thermal_pred_\"+str(fnr)+\".dat\",\"w\")\n H.write(' original \\n')\n H.write(' index ; time stamp ; time in h ; thermal load in W\\n')\n for idx,elem in enumerate(thermal_load_1):\n H.write('{} {} {:11.3f} {:9.2f}\\n'.format(idx,elem['date'],elem['time_in_h'],elem['q_in_W']))\n H.write(' adapted \\n')\n H.write(' index ; time stamp ; time in h ; thermal load in W\\n')\n for idx,elem in enumerate(thermal_load_prediction):\n H.write('{} {} {:11.3f} {:9.2f}\\n'.format(idx,elem['date'],elem['time_in_h'],elem['q_in_W']))\n H.close()\n return thermal_load_prediction\n # end run_to_predict_thermal_loads", "def test_predict():\n\t\n\t# Create a row of data and run prediction.\n\thome = 'Arsenal'\n\taway = 'Chelsea'\n\tstats = pd.read_sql_query(\"select * from stats;\", engine)\n\tmodel = joblib.load('./model.pkl')\n\tresult = prediction.prediction(home, away, stats, model)\n\n\t# Check type of output.\n\tassert isinstance(result, np.ndarray)\n\n\t# Check array length.\n\tassert len(result) == 3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predict using a single time slices. This fits a single plane and corrects the image geometry.
def predict_timeslice_single(vis: Visibility, model: Image, predict=predict_2d_base, **kwargs) -> Visibility: log.debug("predict_timeslice_single: predicting using single time slice") inchan, inpol, ny, nx = model.shape vis.data['vis'] *= 0.0 if not isinstance(vis, Visibility): avis = coalesce_visibility(vis, **kwargs) else: avis = vis # Fit and remove best fitting plane for this slice avis, p, q = fit_uvwplane(avis, remove=False) # Calculate nominal and distorted coordinate systems. We will convert the model # from nominal to distorted before predicting. workimage = copy_image(model) # Use griddata to do the conversion. This could be improved. Only cubic is possible in griddata. # The interpolation is ok for invert since the image is smooth but for clean images the # interpolation is particularly poor, leading to speckle in the residual image. lnominal, mnominal, ldistorted, mdistorted = lm_distortion(model, -p, -q) for chan in range(inchan): for pol in range(inpol): workimage.data[chan, pol, ...] = \ griddata((mnominal.flatten(), lnominal.flatten()), values=workimage.data[chan, pol, ...].flatten(), xi=(mdistorted.flatten(), ldistorted.flatten()), method='cubic', fill_value=0.0, rescale=True).reshape(workimage.data[chan, pol, ...].shape) vis = predict(vis, workimage, **kwargs) return vis
[ "def predict_timeslice(vis: Visibility, model: Image, **kwargs) -> Visibility:\n log.info(\"predict_timeslice: predicting using time slices\")\n\n return predict_with_vis_iterator(vis, model, vis_iter=vis_timeslice_iter,\n predict=predict_timeslice_single, **kwargs)", "def predict(data_prev, model):\r\n timesteps = len(data_prev)\r\n n_features = data_prev[0].size\r\n pred = p_model.predict(np.array(data_prev).reshape(1,timesteps,n_features)).reshape(-1)\r\n return pred", "def predict_single():\n path = 'outputs/gray/img-8-epoch-29.jpg'\n img = Image.open(path)\n img = img.resize((224,224))\n img_original = np.array(img)\n\n gray = rgb2gray(img_original)\n x = TF.to_tensor(gray).float()\n x.unsqueeze_(0)\n model = ColorizationUpsampling()\n model.load_state_dict(torch.load('checkpoints/model-epoch-22-losses-0.002910.pth',\n map_location=torch.device('cpu')))\n\n output = model(x)\n\n output = output.detach()\n color_image = torch.cat((x[0], output[0]), 0).numpy()\n color_image = color_image.transpose((1, 2, 0)) # rescale for matplotlib\n color_image[:, :, 0:1] = color_image[:, :, 0:1] * 100\n color_image[:, :, 1:3] = color_image[:, :, 1:3] * 255 - 128\n color_image = lab2rgb(color_image.astype(np.float16))\n\n color_image_bgr = color_image.astype(np.float32)\n color_image_bgr = cv2.cvtColor(color_image_bgr, cv2.COLOR_RGB2BGR)\n color_image_bgr = cv2.resize(color_image_bgr, (380, 240))\n\n normalized_array = (color_image_bgr - np.min(color_image_bgr)) / (\n np.max(color_image_bgr) - np.min(color_image_bgr)) # this set the range from 0 till 1\n color_image_bgr = (normalized_array * 255).astype(np.uint8)\n gray = cv2.resize(gray, (380, 240))\n gray = np.stack((gray,) * 3, axis=-1)\n\n gray = (gray - np.min(gray)) / (\n np.max(gray) - np.min(gray)) # this set the range from 0 till 1\n gray = (gray * 255).astype(np.uint8)\n vis = np.concatenate((gray, color_image_bgr), axis=1)\n\n frame_normed = np.array(vis, np.uint8)\n\n cv2.imwrite(path[:-4]+\"out.jpg\", frame_normed)\n cv2.imshow(\"out\", frame_normed)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def predict_task(success_only=False):\n controller, aircraft, task, uid, success = read_data()\n if success_only:\n controller = [c for c, s in zip(controller, success) if s]\n task = [t for t, s in zip(task, success) if s]\n\n # for i in range(len(controller)):\n # save_path = '/media/volume/sh/DTW_MDS/plot/'+str(i+1)+'.png'\n # plot_controller(controller[i],save_path)\n\n # L = []\n # for i in range(len(controller)):\n # l = len(controller[i])\n # if l>6000:\n # continue\n # L.append(l)\n # plt.hist(L, bins=30, facecolor=\"blue\", edgecolor=\"black\", alpha=0.7)\n # plt.xlabel(\"Length of controller series\")\n # plt.ylabel(\"Frequency\")\n # # plt.title(\"The overall distribution of the length of controller series\")\n # plt.savefig('/media/volume/sh/DTW_MDS/data2.png')\n # plt.show()\n\n distance_matrix = apply_dtw(controller)\n # distance_matrix = compute_eud_distance(controller) #compute euclidean distance\n\n start = time.time()\n # features = MDS(n_components=8, dissimilarity=\"precomputed\").fit_transform(distance_matrix)\n features = PCA(n_components=12).fit_transform(distance_matrix)\n end= time.time()\n print(end-start)\n\n features, labels = SMOTE(k_neighbors=2).fit_resample(features, task)\n\n model=Model()\n\n # svm_pre = np.mean(split_cross_validation(model.svc, features, labels, success, 5))\n svm_pre = np.mean(cross_val_score(model.svc, features, labels, scoring=\"accuracy\", cv=5, n_jobs=5))\n print(\"Predict task with SVM: {:.3f}\".format(svm_pre))\n\n # knn_pre = np.mean(split_cross_validation(model.knn, features, labels, success, 5))\n knn_pre = np.mean(cross_val_score(model.knn, features, labels, scoring=\"accuracy\", cv=5, n_jobs=5))\n print(\"Predict task with KNN: {:.3f}\".format(knn_pre))\n\n # logreg_pre = np.mean(split_cross_validation(model.logreg, preprocessing.scale(features), labels, success, 5))\n logreg_pre = np.mean(cross_val_score(model.logreg, preprocessing.scale(features), labels, scoring=\"accuracy\", cv=5, n_jobs=5))\n print(\"Predict task with Log: {:.3f}\".format(logreg_pre))\n\n # gnb_pre = np.mean(split_cross_validation(model.gnb, features, labels, success, 5))\n gnb_pre = np.mean(cross_val_score(model.gnb, features, labels, scoring=\"accuracy\", cv=5, n_jobs=5))\n print(\"Predict task with GaussanNB: {:.3f}\".format(gnb_pre))\n\n # rf_pre = np.mean(split_cross_validation(model.rf, features, labels, success, 5))\n rf_pre = np.mean(cross_val_score(model.rf, features, labels, scoring=\"accuracy\", cv=5, n_jobs=5))\n print(\"Predict task with RF: {:.3f}\".format(rf_pre))\n\n # lda_pre = np.mean(split_cross_validation(model.lda, features, labels, success, 5))\n lda_pre = np.mean(cross_val_score(model.lda, features, labels, scoring=\"accuracy\", cv=5, n_jobs=5))\n print(\"Predict task with LDA: {:.3f}\".format(lda_pre))\n return [svm_pre, knn_pre, logreg_pre, gnb_pre, rf_pre, lda_pre]\n # return", "def predict():\n unet = get_unet()\n mean_train, std_train, history = train_unet(unet)\n\n input_size = unet.get_layer(\"input_layer\").input_shape[0][1]\n output_size = unet.get_layer(\"output_layer\").output_shape[1]\n\n test_data = images.load_test(cst.TEST_DIR, cst.TEST_SIZE, input_size, output_size, mean_train, std_train)\n\n masks = unet.predict(test_data, verbose=1)\n numpy.save(\"image_mask.npy\", masks)\n\n return masks, history", "def predict_from_cam():\n #send_mqtt_message(\"Taking a photo...\")\n my_camera = camera.Camera()\n image = Image.open(my_camera.capture_image())\n image_data = utils.transform_image(image)\n\n #send_mqtt_message(\"Start predicting...\")\n predict(image_data)", "def predict(self, states, actions):\n \"\"\" YOUR CODE HERE \"\"\"\n states = states.reshape((-1, states.shape[-1]))\n actions = actions.reshape((-1, actions.shape[-1]))\n return self.sess.run(self.pred_next_obs, feed_dict={self.ob_ph:states, self.ac_ph:actions}).reshape(states.shape)", "def test_auto_predict_unit_timing_predict(self) -> None:\n my_module = torch.nn.Linear(2, 2)\n\n input_dim = 2\n dataset_len = 8\n batch_size = 2\n\n predict_dl = generate_random_iterable_dataloader(\n dataset_len, input_dim, batch_size\n )\n predict(\n TimingAutoPredictUnit(module=my_module),\n predict_dl,\n max_steps_per_epoch=1,\n timer=Timer(),\n )", "def predict_success():\n controller, aircraft, task, uid, success= read_data()\n unique_tasks = sorted(list(set(task)))\n scores = list()\n # model = SVC()\n model = Model()\n for t in unique_tasks:\n idx = [u == t for u in task]\n controller_ = [c for c, i in zip(controller, idx) if i]\n success_ = [s for s, i in zip(success, idx) if i]\n\n # skip task if all subjects succeed or fail because only one class is represented\n success_rate = np.sum(success_) / len(success_)\n if success_rate < 1e-8 or success_rate > 1 - 1e-8:\n continue\n\n controller_ = [c[:c.shape[0] // 2] for c in controller_] #get half controllers\n scores.append(_predict_success(controller_, success_, model.logreg))\n str = \"Prediction accurate on task %d is %.3f\" % (t, scores[t-1])\n print(str)\n print(\"Predict success: {:.3f}\".format(np.mean(scores)))\n return", "def predict(self, X, task_id):\n return self.pl.predict(X, task_id)", "def predict(model, X_test):", "def predict(img, ibl_img):\r\n img_trnsf = transforms.Compose([\r\n Mask_Transform(),\r\n ToTensor()\r\n ])\r\n ibl_trnsf = transforms.Compose([\r\n # IBL_Transform(),\r\n ToTensor()\r\n ])\r\n\r\n ibl_tensor = ibl_trnsf(ibl_img)\r\n c,h,w =ibl_tensor.size()\r\n ibl_tensor = ibl_tensor.view(1, c, h, w)\r\n # print('ibl: ', ibl_tensor.size())\r\n\r\n img_tensor = img_trnsf(img)\r\n c,h,w = img_tensor.size()\r\n img_tensor = img_tensor.view(1, c, h, w)\r\n model.eval()\r\n with torch.no_grad():\r\n I_s = img_tensor.to(device)\r\n L_t = ibl_tensor.to(device)\r\n predicted_img = model(I_s, L_t)\r\n predicted_img = predicted_img[0].detach().cpu().numpy()\r\n predicted_img = predicted_img[0].transpose((1,2,0))\r\n return predicted_img", "def predict_autophase_onset(predictions):\n #scan through predictions\n phase_pred=[]\n for i in range(len(predictions)):\n for j in range(len(np.shape(predictions))-1):\n #return phase onsets where there is a \n phase_pred[i,j] = np.argmax(predictions[i,:,j]) \n return phase_pred", "def predict_survival(self, t, is_lagged = False):\r\n return self.model.predict_survival(t, is_lagged)", "def predict():\n if model:\n\n try:\n incoming_data = request.get_json()\n client_ip = request.environ['REMOTE_ADDR']\n # Keep only the variables contribution to model prediction\n repeat_contact = {key: [value] for key, value in incoming_data.items() if key.lower() not in config.NOT_TO_READ}\n \n with counter.get_lock():\n counter.value += 1\n out = counter.value\n predictions = predict_repeat_contact(repeat_contact, model, features_transform_pipe)\n app.logger.info(f\"The prediction has been served for request id {counter} with client ip {client_ip}\")\n \n # we can store the incoming_data and final predictions in the database \n\n return jsonify(predictions)\n except:\n return jsonify({'trace': traceback.format_exc()})\n else:\n return (\"No model loaded\")", "def predict(self, state, head):\n return self.models[head].predict(state)", "def one_time_prediction(predicted_vector_dim, mid_constraint_dict, flux_value_dict):\n\n predicted_vector = np.zeros(predicted_vector_dim)\n total_flux_value = 0\n for flux_name, mid_vector in mid_constraint_dict.items():\n if flux_name == target_label:\n continue\n else:\n flux_value = flux_value_dict[flux_name]\n total_flux_value += flux_value\n predicted_vector += flux_value * mid_vector\n predicted_vector /= total_flux_value\n return predicted_vector", "def predict(self, target_period=None, dataset_begin=None, dataset_end=None, debug=False):\n raise NotImplementedError('Not Implemented')", "def predict_plot_future(self,times):\r\n\r\n fut = self.x_val[-1:] # Take last window\r\n Y = []\r\n for i in range (0, times):\r\n cur_window = fut[i]\r\n #print(cur_window)\r\n y = self.model.predict(np.expand_dims(cur_window, axis = 0))\r\n #print(y[0])\r\n Y.append(y[0])\r\n #print(y)\r\n new_window = np.append(cur_window[ 1: ,] , y, axis = 0)\r\n #print(new_window.shape)\r\n fut = np.append(fut, np.expand_dims(new_window, axis = 0), axis = 0)\r\n #print(fut.shape)\r\n\r\n \r\n #print(np.array(Y).shape)\r\n #print(np.array(Y)[: , -1])\r\n self.scaler.fit(self.test_set[: , -1].reshape(-1,1))\r\n new_fut_y = self.scaler.inverse_transform(np.array(Y)[: ,-1].reshape(-1,1)).reshape(1,-1)\r\n #print(new_fut_y, new_fut_y.shape)\r\n print(new_fut_y[0])\r\n #plt.plot(np.array(Y)[: , -1])\r\n plt.plot(new_fut_y[0], color = 'green',marker = 'o', markerfacecolor='blue', markersize=9)\r\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does a ttest between the scores calculated for each survey rating. Also plots a histogram of each rating to check the distribution of the scores
def scores_vs_rating(): rating_comparison = { 1: [], 2: [], 3: [], 4: [], 5: [] } rating_key = "like_rating_specific" for user, session in Session.get_users_with_surveys(): boundary = HistogramBoundary(user) survey = user.get_survey() for playlist_index, playlist in enumerate(session.recommendations): survey_ratings = survey[f"playlist{playlist_index+1}"][rating_key] for track_index, track in enumerate(playlist["tracks"]): track_rating, _ = boundary.get_boundary_score(track) survey_rating = int(survey_ratings[f'Song{track_index + 1}']) rating_comparison[survey_rating].append(track_rating) result_string = "" for rating_bin, scores in rating_comparison.items(): result_string += f"{rating_bin}: {statistics.mean(scores):.3f}, " result_string = result_string[:-2] print(result_string) for rating_bin, scores in rating_comparison.items(): plt.hist(scores, bins=20) plt.title(f"Rating: {rating_bin} (total: {len(scores)})") plt.xlim((0.0, 8.0)) plt.show() t_tests = {} for i in range(1, 6): t_tests[i] = {} for j in range(1, 6): if i != j: t_test_score = ttest_ind( rating_comparison[i], # [:min_amount], rating_comparison[j], # [:min_amount], equal_var=False ) t_tests[i][j] = t_test_score[1] pprint(t_tests)
[ "def overall_score_eda(df, figure_path):\n # values counts for each review score\n print('\\nValue counts for each review score (1-5)')\n print(df['score'].value_counts())\n # visualize review scores distribution\n fig, ax = plt.subplots()\n df['score'].value_counts().plot(ax=ax, kind='bar')\n total = float(len(df['score']))\n for p in ax.patches:\n percentage = '{:.1f}%'.format(100 * p.get_height() / total)\n x = p.get_x() + p.get_width() - 0.45\n y = p.get_height()\n ax.annotate(percentage, (x, y))\n plt.title('Distribution of Review Scores')\n plt.xlabel('Review Score')\n plt.ylabel('Number of Reviews')\n plt.savefig(figure_path)", "def analyze_and_plot_data(ratings):\n \n num_users = ratings['user_id'].nunique()\n num_items = ratings['movie_id'].nunique()\n print(\"Number of unique users is \" + str(num_users))\n print(\"Number of unique movies is \" + str(num_items))\n print(\"The number of ratings in the dataset set is \" + str(ratings.shape[0]))\n\n #Determine ratings distribution and plot results\n count = ratings['rating'].value_counts()\n count = count.to_frame('count')\n count.index.name = 'Rating'\n count = count.sort_values(by='Rating', ascending=1)\n count.plot(kind='bar')\n plt.ylabel('Number of ratings')\n plt.title('Distribution of Ratings')\n plt.savefig('ratings_distribution.png')\n\n #Pie plot\n count.plot(kind='pie', subplots=True, figsize=(5, 5), autopct='%1.0f%%')\n plt.title('Distribution of Ratings')\n plt.savefig('ratings_distribution_pie.png')\n plt.show()\n\n #Determine number of ratings per movie and plot data \n count_movies_rated = ratings['movie_id'].value_counts()\n buckets = [250, 150, 50, 25, 5, 1]\n ratings_dist = np.zeros(6)\n prior_count = 0\n for i in range(6):\n ratings_dist[i] = count_movies_rated[count_movies_rated >= buckets[i]].count()\n ratings_dist[i] -= prior_count\n prior_count += ratings_dist[i]\n\n plt.title('Ratings per Movie')\n plt.xlabel('Number of ratings')\n plt.ylabel('Number of movies')\n label = ['>250','150-250', '50-150','50-25', '25-5', '1-5']\n index = np.arange(len(label))\n plt.bar(index, ratings_dist)\n plt.xticks(index, label)\n plt.savefig('movies_distribution.png')\n\n plt.show()\n\n #Determine how the number of ratings per user and plot data\n count_users = ratings['user_id'].value_counts()\n buckets = [250, 150, 50, 25, 5, 1]\n users_dist = np.zeros(6)\n prior_count = 0\n for i in range(6):\n users_dist[i] = count_users[count_users >= buckets[i]].count()\n users_dist[i] -= prior_count\n prior_count += users_dist[i]\n\n plt.title('Ratings per User')\n plt.xlabel('Number of ratings')\n plt.ylabel('Number of users')\n plt.bar(index, users_dist)\n plt.xticks(index, label)\n plt.savefig('users_distribution.png')\n\n plt.show()", "def plot_results(years, ratings):\n\n # Defines the figure with two subplots and a fixed size in inches\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize = (12, 5))\n\n # Leftern subplot with ratings from 0 to 10\n ax = axs[0]\n plot_subplot(years, rating, ax, 0, 10)\n\n # Rightern subplot with ratings from 8 to 9\n ax = axs[1]\n plot_subplot(years, rating, ax, 8, 9)\n\n fig.suptitle(\"The average rating per year of the top 50 highest-rated movies from 2008 to 2017\")", "def test_score_t(self) -> None:\n self._test_score(\n score=self.instance.score_t, columns=slice(0, 2), shape=(self.batch_size, self.instance.num_entities)\n )", "def plot(self):\r\n tpr, fpr, thresholds = self.__calc_tpr_fpr()\r\n self.results = np.column_stack((tpr, fpr, thresholds))\r\n\r\n # %%% TODO START YOUR CODE HERE %%%\r\n\r\n fig = plt.figure()\r\n plt.plot(fpr, tpr)\r\n fig.suptitle('ROC Plot')\r\n plt.xlabel('True Negative Rate')\r\n plt.ylabel('True Positive Rate')\r\n\r\n # %%% END YOUR CODE HERE %%%\r", "def plot_scores(scores):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()", "def _get_test_scores():\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT testset_id, score\n FROM (\n SELECT test_option.testset_id, AVG(mco.is_correct) AS score, \n COUNT(*) as n_responses\n FROM (\n SELECT tsr.testset_id, mcr.option_id\n FROM drill_testset_responses AS tsr\n INNER JOIN drill_multiplechoiceresponse AS mcr\n ON tsr.multiplechoiceresponse_id = mcr.response_ptr_id\n ) AS test_option\n INNER JOIN drill_multiplechoiceoption AS mco\n ON test_option.option_id = mco.id\n GROUP BY test_option.testset_id\n ) AS results\n WHERE n_responses > 0\n \"\"\")\n return [(i, float(s)) for (i, s) in cursor.fetchall()]", "def plot_score_distribution(\n target_scores,\n non_target_scores,\n experiment_name,\n hist_bins=HIST_BINS,\n filename=None,\n):\n\n fig, ax = pyplot.subplots()\n ax2 = ax.twinx()\n\n ax2.hist(target_scores, bins=hist_bins, density=True, color=\"blue\",\n alpha=0.5)\n ax.hist(non_target_scores, bins=hist_bins, density=True, color=\"red\",\n alpha=0.5)\n\n ax.set_xlabel(\"Score\", )\n ax2.set_ylabel(\"Target normed count\", color=\"b\")\n ax.set_ylabel(\"Non-target normed count\", color=\"r\")\n ax.set_title(experiment_name)\n fig.tight_layout()\n\n if filename is not None:\n fig.savefig(filename)\n\n return fig", "def averagesclass():\n\tlistofresults=[]\n\tfor i in range(0,user_number):\n\t\tcurrent=list(map(function, data[i][\"responses\"]))\n\t\ta=len(current)\n\t\tfor i in range(0,numquestions-a):\n\t\t\tcurrent.append(0)\n\n\t\tlistofresults.append(current)\n\n\tresults= map(sum, zip(*listofresults))\n\tx_values=[]\n\tfor i in range(1,numquestions+1):\n\t\tx_values.append(i)\n\n\tplt.scatter(x_values, results)\n\tplt.plot(x_values, results)\n\n\tplt.ylim(0,user_number)\n\tplt.xlim(0.5,numquestions+0.5)\n\n\tplt.xticks(list(range(1,numquestions+1)))\n\tplt.xlabel(\"Question Number\")\n\tplt.ylabel(\"Number of Students who got the question Correct\")\n\n\tplt.savefig(\"analytics/class_average.png\")\n\tplt.close()", "def _ttest(self, res1_path, res2_path):\n fn1 = res1_path.split('/')[-1]\n fn2 = res2_path.split('/')[-1]\n print(\"H0: x({}) <= x({})\".format(fn1, fn2))\n acc_diff, fscore_diff, l_diff = self._get_diffs_mode1(res1_path, res2_path)\n\n t_stat, p_val = ttest_1samp(acc_diff, 0)\n rejection = (p_val / 2 < self.alpha) and (t_stat > 0)\n print(' Accuracies:')\n print(' Rejection: ', rejection)\n if rejection:\n print(' P-value: ', p_val)\n\n t_stat, p_val = ttest_1samp(fscore_diff, 0)\n rejection = (p_val / 2 < self.alpha) and (t_stat > 0)\n print(' F1-scores:')\n print(' Rejection: ', rejection)\n if rejection:\n print(' P-value: ', p_val)\n\n t_stat, p_val = ttest_1samp(l_diff, 0)\n rejection = (p_val / 2 < self.alpha) and (t_stat > 0)\n print(' Losses:')\n print(' Rejection: ', rejection)\n if rejection:\n print(' P-value: ', p_val)\n\n acc_diff, fscore_diff = self._get_diffs_mode2(res1_path, res2_path)\n\n t_stat, p_val = ttest_1samp(acc_diff, 0)\n rejection = (p_val / 2 < self.alpha) and (t_stat > 0)\n print(' Accuracies (SW):')\n print(' Rejection: ', rejection)\n if rejection:\n print(' P-value: ', p_val)\n\n t_stat, p_val = ttest_1samp(fscore_diff, 0)\n rejection = (p_val / 2 < self.alpha) and (t_stat > 0)\n print(' F1-scores (SW):')\n print(' Rejection: ', rejection)\n if rejection:\n print(' P-value: ', p_val)", "def test_score_hrt(self) -> None:\n self._test_score(score=self.instance.score_hrt, columns=slice(None), shape=(self.batch_size, 1))", "def plot_score_distribution(self):\n plt.figure(figsize=(10, 7))\n sns.distplot(self.corresponding_logits, kde=False, hist=True, label='Prediction logit score distribution')\n\n plt.show()", "def score(self, hypothesis, outputs):\n top = 0\n bot = 0\n for did, det in enumerate(hypothesis.tracklet):\n if det is not None:\n bot += 1\n if outputs[\"labels\"][did] > 0.5:\n bot += 1\n if det is not None and outputs[\"labels\"][did] > 0.5:\n top += outputs[\"ious\"][did]\n return 2 * top * 1. / bot", "def test_score_t_with_score_hrt_equality(self) -> None:\n self._test_score_equality(columns=slice(2), name=\"score_t\")", "def plot_score(ax, series, labels, colors, ylabel):\n ax.set_ylabel(\"Percentile of score ({})\".format(ylabel))\n ax.set_xlabel(\"Time elapsed, sec\")\n for s, label, color in zip(series, labels, colors):\n scoref = lambda x: stats.percentileofscore(x, s.quantile(0.9))\n rolling_score = pd.rolling_apply(s, min(len(s) / 15, 40), scoref)\n ax.plot(s.index, rolling_score, label=label, color=color)\n plt.ylim(ymin=0, ymax=105)", "def test_Misc_scores(self):\n mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa\n predictions_mock = mock_data.drop(columns=['actual', 'predictions'])\n predictions_mock.columns = [1, 0]\n ######################################################################################################\n score = SensitivityScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5))\n assert isinstance(score, UtilityFunctionMixin)\n assert isinstance(score, ScoreBase)\n accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)\n assert isclose(accuracy, recall_score(y_true=mock_data.actual, y_pred=mock_data.predictions))\n assert isclose(score.value, recall_score(y_true=mock_data.actual, y_pred=mock_data.predictions))\n ######################################################################################################\n score = SpecificityScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5))\n assert isinstance(score, UtilityFunctionMixin)\n assert isinstance(score, ScoreBase)\n accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)\n assert isclose(accuracy, 0.8183962264150944)\n assert isclose(score.value, 0.8183962264150944)\n ######################################################################################################\n score = PositivePredictiveValueScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5)) # noqa\n assert isinstance(score, UtilityFunctionMixin)\n assert isinstance(score, ScoreBase)\n accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)\n assert isclose(accuracy, 0.6607929515418502)\n assert isclose(score.value, 0.6607929515418502)\n ######################################################################################################\n score = NegativePredictiveValueScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5)) # noqa\n assert isinstance(score, UtilityFunctionMixin)\n assert isinstance(score, ScoreBase)\n accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)\n assert isclose(accuracy, 0.7125256673511293)\n assert isclose(score.value, 0.7125256673511293)\n ######################################################################################################\n score = AccuracyScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5))\n assert isinstance(score, UtilityFunctionMixin)\n assert isinstance(score, ScoreBase)\n accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)\n assert isclose(accuracy, accuracy_score(y_true=mock_data.actual, y_pred=mock_data.predictions))\n assert isclose(score.value, accuracy_score(y_true=mock_data.actual, y_pred=mock_data.predictions))", "def plot_performance_over_thresholds(relevant_results):\n\n plot_data = relevant_results[relevant_results['threshold'] != 'peak']\n plot_data['threshold'] = plot_data['threshold'].apply(int)\n\n peak_data = relevant_results[relevant_results['threshold'] == 'peak']\n peak_data['threshold'] = peak_data['n'] - peak_data['peak']\n\n fig, ax_mantel_scores = plt.subplots(figsize=(10, 12))\n margin = 0.04\n ht = 0.28\n\n \"\"\" Top panel is Mantel correlations. \"\"\"\n ax_mantel_scores.set_position([margin, 1.0 - margin - ht, 1.0 - (2 * margin), ht])\n sns.lineplot(x=\"threshold\", y=\"best\", data=plot_data, color=\"gray\", ax=ax_mantel_scores, label=\"peak\")\n sns.lineplot(x=\"threshold\", y=\"train_score\", data=plot_data, color=\"green\", ax=ax_mantel_scores, label=\"train\")\n sns.scatterplot(x=\"threshold\", y=\"train_score\", data=peak_data, color=\"green\", ax=ax_mantel_scores)\n sns.lineplot(x=\"threshold\", y=\"test_score\", data=plot_data, color=\"red\", ax=ax_mantel_scores, label=\"test\")\n sns.scatterplot(x=\"threshold\", y=\"test_score\", data=peak_data, color=\"red\", ax=ax_mantel_scores)\n\n rect = patches.Rectangle((158, -0.3), 5.0, 1.0, facecolor='gray', fill=True, alpha=0.25)\n ax_mantel_scores.add_patch(rect)\n\n ax_mantel_scores.legend(labels=['peak', 'train', 'test'])\n plt.suptitle(\"Scores by top probe threshold\")\n ax_mantel_scores.set_ylabel('Mantel correlation')\n\n \"\"\" Middle panel is Overlap calculations. \"\"\"\n ax_overlaps = fig.add_axes([margin, (2 * margin) + ht, 1.0 - (2 * margin), ht],\n \"Real vs Shuffle Overlap Percentages\")\n sns.lineplot(x=\"threshold\", y=\"train_vs_test_overlap\", data=plot_data, color=\"gray\", ax=ax_overlaps,\n label=\"t-t overlap\")\n sns.scatterplot(x=\"threshold\", y=\"train_vs_test_overlap\", data=peak_data, color=\"black\", ax=ax_overlaps)\n sns.lineplot(x=\"threshold\", y=\"overlap_real_vs_agno\", data=plot_data, color=\"green\", ax=ax_overlaps,\n label=\"agno\")\n sns.scatterplot(x=\"threshold\", y=\"overlap_real_vs_agno\", data=peak_data, color=\"green\", ax=ax_overlaps)\n sns.lineplot(x=\"threshold\", y=\"overlap_real_vs_dist\", data=plot_data, color=\"red\", ax=ax_overlaps,\n label=\"dist\")\n sns.scatterplot(x=\"threshold\", y=\"overlap_real_vs_dist\", data=peak_data, color=\"red\", ax=ax_overlaps)\n sns.lineplot(x=\"threshold\", y=\"overlap_real_vs_edge\", data=plot_data, color=\"orchid\", ax=ax_overlaps,\n label=\"edge\")\n sns.scatterplot(x=\"threshold\", y=\"overlap_real_vs_edge\", data=peak_data, color=\"orchid\", ax=ax_overlaps)\n sns.lineplot(x=\"threshold\", y=\"overlap_real_vs_be04\", data=plot_data, color=\"orchid\", ax=ax_overlaps,\n label=\"be04\")\n sns.scatterplot(x=\"threshold\", y=\"overlap_real_vs_be04\", data=peak_data, color=\"orchid\", ax=ax_overlaps)\n sns.lineplot(x=\"threshold\", y=\"overlap_real_vs_be08\", data=plot_data, color=\"orchid\", ax=ax_overlaps,\n label=\"be08\")\n sns.scatterplot(x=\"threshold\", y=\"overlap_real_vs_be08\", data=peak_data, color=\"orchid\", ax=ax_overlaps)\n sns.lineplot(x=\"threshold\", y=\"overlap_real_vs_be16\", data=plot_data, color=\"orchid\", ax=ax_overlaps,\n label=\"be16\")\n sns.scatterplot(x=\"threshold\", y=\"overlap_real_vs_be16\", data=peak_data, color=\"orchid\", ax=ax_overlaps)\n v_rect = patches.Rectangle((158, 0.0), 5.0, 1.0, facecolor='gray', fill=True, alpha=0.25)\n ax_overlaps.add_patch(v_rect)\n\n \"\"\" Bottom panel is t-scores. \"\"\"\n ax_mantel_ts = fig.add_axes([margin, margin, 1.0 - (2 * margin), ht], \"Mantel T Scores\")\n sns.lineplot(x=\"threshold\", y=\"t_mantel_agno\", data=plot_data, color=\"green\", ax=ax_mantel_ts, label=\"agno\")\n sns.lineplot(x=\"threshold\", y=\"t_mantel_dist\", data=plot_data, color=\"red\", ax=ax_mantel_ts, label=\"dist\")\n sns.lineplot(x=\"threshold\", y=\"t_mantel_edge\", data=plot_data, color=\"orchid\", ax=ax_mantel_ts, label=\"edge\")\n sns.lineplot(x=\"threshold\", y=\"t_mantel_be04\", data=plot_data, color=\"orchid\", ax=ax_mantel_ts, label=\"be04\")\n sns.lineplot(x=\"threshold\", y=\"t_mantel_be08\", data=plot_data, color=\"orchid\", ax=ax_mantel_ts, label=\"be08\")\n sns.lineplot(x=\"threshold\", y=\"t_mantel_be16\", data=plot_data, color=\"orchid\", ax=ax_mantel_ts, label=\"be16\")\n\n v_rect = patches.Rectangle((158, -100), 5.0, 200.0, facecolor='gray', fill=True, alpha=0.25)\n ax_mantel_ts.add_patch(v_rect)\n h_rect = patches.Rectangle((0, -2), 1024.0, 2.0, facecolor='gray', fill=True, alpha=0.25)\n ax_mantel_ts.add_patch(h_rect)\n\n ax_mantel_ts.legend(labels=['agno', 'dist', 'edge', 'be04', 'be08', 'be16', ])\n ax_mantel_ts.set_ylabel('T score')\n\n return fig, (ax_mantel_scores, ax_mantel_scores, ax_mantel_ts)", "def t_stat_weighted(avg_data,se_data):\n\n t_score = avg_data/se_data\n\n return t_score", "def testBettiGenusROC(Nsize,power_null,power_test,average,num_iter):\n diagnol = np.arange(0,1.1,0.1)\n\n\n [Betti_null,Betti_test,Genus_null,Genus_test,thresholds] = utilities.Generate_BettiGenus_array(Nsize,power_null,power_test,average,num_iter) \n\n [PFA_betti0,PD_betti0] = rocGen.BettiROC(Betti_null[:,0,:],Betti_test[:,0,:],power_null,power_test) \n [PFA_betti1,PD_betti1] = rocGen.BettiROC(Betti_null[:,1,:],Betti_test[:,1,:],power_null,power_test) \n [PFA_Genus,PD_Genus] = rocGen.GenusROC(Genus_null,Genus_test,power_null,power_test)\n\n fig1 = plt.figure()\n ax11 = fig1.add_subplot(311)\n ax12 = fig1.add_subplot(312)\n ax13 = fig1.add_subplot(313)\n ax11.plot(thresholds,Betti_null[5,0,:],label= 'power index = {null}'.format(null=power_null))\n ax11.plot(thresholds,Betti_test[5,0,:],label ='power index = {test}'.format(test=power_test))\n ax12.plot(thresholds,Betti_null[5,1,:],label= 'power index = {null}'.format(null=power_null))\n ax12.plot(thresholds,Betti_test[5,1,:],label ='power index = {test}'.format(test=power_test))\n ax13.plot(thresholds,Genus_null[5,:],label= 'power index = {null}'.format(null=power_null))\n ax13.plot(thresholds,Genus_test[5,:],label ='power index = {test}'.format(test=power_test))\n ax11.title.set_text('Betti0 ROC')\n ax12.title.set_text('Betti1 ROC')\n ax13.title.set_text('Genus ROC')\n ax11.legend()\n ax12.legend()\n ax13.legend()\n fig1.tight_layout()\n\n fig2 = plt.figure()\n ax21 = fig2.add_subplot(131)\n ax22 = fig2.add_subplot(132)\n ax23 = fig2.add_subplot(133)\n ax21.plot(PFA_betti0,PD_betti0,label= 'Null = {null}, Test = {test}'.format(null=power_null,test=power_test))\n ax21.plot(diagnol,diagnol,label='x = y')\n ax22.plot(PFA_betti1,PD_betti1,label= 'Null = {null}, Test = {test}'.format(null=power_null,test=power_test))\n ax22.plot(diagnol,diagnol,label='x = y')\n ax23.plot(PFA_Genus,PD_Genus,label= 'Null = {null}, Test = {test}'.format(null=power_null,test=power_test))\n ax23.plot(diagnol,diagnol,label='x = y')\n\n ax21.title.set_text('Betti0 ROC')\n ax22.title.set_text('Betti1 ROC')\n ax23.title.set_text('Genus ROC')\n fig2.legend()\n ax21.legend()\n ax22.legend()\n ax23.legend()\n fig2.tight_layout()\n print('. . . Finished the test Betti Genus ROC ')\n\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function calculates the power coefficient for a given tip speed ratio
def calculate_C_p(tip_speed_ratio): a_min = get_induction_factor(0.0) a_max = get_induction_factor(tip_speed_ratio) # Calculate integral integral = lambda a: ((1 - a) * (1 - 2 * a) * (1 - 4 * a) / (1 - 3 * a)) ** 2 a = np.linspace(a_min, a_max, 100000) da = a[1] - a[0] dCp = integral(a) * da Cp = np.sum(dCp) * 24.0 / tip_speed_ratio ** 2 return Cp
[ "def calcPower(speed, resistance_level):\r\n satoridata = [\r\n {\r\n 'level': 1,\r\n 'slope': 3.73,\r\n 'intercept': -28.67\r\n },\r\n {\r\n 'level': 2,\r\n 'slope': 5.33,\r\n 'intercept': -36.67\r\n },\r\n {\r\n 'level': 3,\r\n 'slope': 6.87,\r\n 'intercept': -43.33\r\n },\r\n {\r\n 'level': 4,\r\n 'slope': 8.27,\r\n 'intercept': -47.33\r\n },\r\n {\r\n 'level': 5,\r\n 'slope': 10.07,\r\n 'intercept': -66.33\r\n },\r\n {\r\n 'level': 6,\r\n 'slope': 11.4,\r\n 'intercept': -67.00\r\n },\r\n {\r\n 'level': 7,\r\n 'slope': 13.13,\r\n 'intercept': -83.67\r\n },\r\n {\r\n 'level': 8,\r\n 'slope': 14.4,\r\n 'intercept': -82.00\r\n },\r\n {\r\n 'level': 9,\r\n 'slope': 15.93,\r\n 'intercept': -89.67\r\n },\r\n {\r\n 'level': 10,\r\n 'slope': 17.73,\r\n 'intercept': -114.67\r\n }\r\n ]\r\n\r\n power = satoridata[resistance_level-1]['slope'] * speed + satoridata[resistance_level-1]['intercept']\r\n print(resistance_level, power)\r\n return max((0, round(power)))", "def speedMultiplier(self) -> float:\n return self._getMultiplier('speed')", "def airspeedMultiplier(s, obj):\n\n speed = WUps2kts(obj.V.norm())\n return 2.25 / (1 + exp(-0.024 * (speed - 212)))", "def _compute_tcl_power(self):\n return sum([tcl.u*tcl.P for tcl in self.tcls])", "def productivityMultiplier(self) -> float:\n return self._getMultiplier('productivity')", "def accelerate(speed=0.0, coeff=5, factor=10):\n if speed < 10.0:\n divisor = 10.0\n else:\n divisor = speed\n newSpeed = speed + factor * coeff / divisor\n if newSpeed < 0.0:\n newSpeed = 0.0\n return newSpeed", "def cost_multiplier(self):\n return 1.0", "def calc_power(field):\r\n\r\n\tpoynt_in_points = numpy.real(field.p * numpy.conj(field.vn))\r\n\tpower = numpy.sum(poynt_in_points)\r\n\tpower *= field.one_pixel_area\r\n\treturn power", "def get_pow_pow(challenge, difficulty):\n return calculate(challenge, difficulty)", "def get_power(self, t: Time):\n t = t.as_decimal_hour\n return self.P15_ip.solve(t) # unit: kW", "def getWheelSpeed(diameter,rps):\n circ=math.pi*diameter\n return rps*circ", "def to_multiplier(difficulty):\n return float((1 << 64) - int(work_difficulty, 16)) / float(\n (1 << 64) - int(difficulty, 16)\n )", "def calc_thrust_power(block_count):\n return block_count / 0.03", "def effectivePollutionMultiplier(self) -> float:\n return self.pollutionMultiplier() * self.energyMultiplier()", "def rule_power_factor(f11, f10, f01, f00):\n N = f11 + f10 + f01 + f00\n zero = 1e-10\n supp_ab = f11 / N\n supp_a = f10 / N\n return (supp_ab * supp_ab) / (supp_a + zero)", "def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))", "def get_power(self, wsi, level: int) -> float:\n raise ValueError(\n \"Currently, TiffFile does not provide a general API to obtain objective power.\"\n \"Please use `level` (or `mpp`) instead, or try other backends.\"\n )", "def get_power(self, wsi, level: int) -> float:\n objective_power = wsi.properties.get(\"openslide.objective-power\")\n if objective_power:\n downsample_ratio = self.get_downsample_ratio(wsi, level)\n return float(objective_power) / downsample_ratio\n\n raise ValueError(\"Objective `power` cannot be obtained for this file. Please use `level` (or `mpp`) instead.\")", "def _calculate_power(self):\n if self.alpha is None:\n alpha = 0.05\n else:\n alpha = self.alpha\n\n self.power = self._power_internals(self.n, alpha)\n self.beta = 1 - self.power" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the input vector is uniform, then collapse it to a single value, otherwise raise a warning.
def collapse(vec, name=None, exclude=[]): if name is None: name = '**unkown**' if isuniform(vec): return vec[0] elif isuniform(vec, exclude=exclude): return list(set(np.unique(vec)) - set(exclude))[0] else: warnings.warn("The variable {} is expected to be uniform," " but it is not.".format(name)) return vec
[ "def uniform(self):\n a = np.random.uniform()\n while a == 1.0: a = np.random.uniform()\n return a", "def uniform_solution():\n return BridgeFactory.solution_from_indices(lambda _1, _2: uniform(0, 1))", "def lecun_uniform(seed=None):\n return VarianceScaling(\n scale=1., mode='fan_in', distribution='uniform', seed=seed)", "def _normalize(\n vec,\n):\n\n return vec / math.sqrt(sum(vec ** 2))", "def two_d_uniform_density(vector):\n #prototype of a density function. This is how measures are specified.\n x = vector[0]\n y = vector[1]\n if (0<=x) and (x<=1) and (0<=y) and (y<=1):\n return 1.0\n else:\n return 0.0", "def unit_vector(v):\n return v / la.norm(v)", "def validate_vector(u, flatten=True):\n\n # convert it to ndarray\n u = np.asarray(u)\n\n # flatten it if flatten is True.\n if flatten:\n u = u.flatten()\n return u", "def redistribute(v: np.ndarray, f=1):\n with np.errstate(divide='ignore'):\n return 1 / (1 + (1 / v - 1)) ** (1 / f)", "def test_from_sample_single_value(self):\n\n zero_vec = np.zeros(10)\n\n # Default density estimator can't handle situation with single unique\n # sample value (gives `LinAlgError: singular matrix`).\n\n # Case when sample width is zero but density is not zero\n density_centered_interval = make_circ_density([(-1, 1)])\n with config.context({\"estimator_cont\": lambda x: density_centered_interval}):\n assert from_sample_cdf_max_error(zero_vec) <= 1e-4\n\n # Case when both sample width and density are zero\n density_shifted_interval = make_circ_density([(10, 20)])\n with config.context({\"estimator_cont\": lambda x: density_shifted_interval}):\n # Here currently the problem is that support is estimated way to\n # wide with very small (~1e-9) non-zero density outside of [10,\n # 20]. However, CDFs are still close.\n assert from_sample_cdf_max_error(zero_vec) <= 2e-4", "def SquareValue(v):\r\n return v * v", "def _ve_gauss_ ( s , accept = lambda a : True , nmax = 1000 ) :\n #\n if 0 >= s.cov2() or iszero ( s.cov2 () ) : return s.value() ## return\n #\n v = s.value ()\n e = s.error ()\n #\n for i in range ( nmax ) :\n r = _gauss ( v , e ) \n if accept ( r ) : return r\n \n logger.warning(\"Can'n generate proper random number %s\" % s )\n return v", "def normalise(vect):\n return vect / np.sum(vect)", "def _random_uniform(self,nominal_values,threshold):\n nr = len(nominal_values)\n return nominal_values*((self.np_random.rand(nr)*2 - 1)*threshold + 1)", "def unit_vector(vector):\n return vector/mag(vector)", "def _check_for_constant_features_in_samplets(data):\n\n for samplet, features in data.items():\n uniq_values = np.unique(features)\n # when there is only one unique value, among n features\n if uniq_values.size < 2:\n raise ConstantValuesException(\n 'Constant values ({}) detected for {} '\n '- double check the process, '\n 'or disable this check!'.format(uniq_values, samplet))", "def at_least_one_negative(lst):\n if not any(item < 0 for item in lst):\n lst[random.randint(0, len(lst) - 1)] *= -1\n return lst", "def norm(vector):\r\n\treturn math.sqrt(default_scalar_prod(vector, vector))", "def random_unit_vector():\n return (np_to_euc_mv(np.random.randn(3))).normal()", "def bi2uni(v):\n if -5 <= v <= 5:\n return (v + 5) / 2.0\n raise ValueError('clip!')", "def he_uniform(seed=None):\n return VarianceScaling(\n scale=2., mode='fan_in', distribution='uniform', seed=seed)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the configuration information (e.g., number of pings, number of beams, struct types, etc.) from the index data. Returns =======
def calc_config(index): ids = np.unique(index['ID']) config = {} for id in ids: if id not in [21, 24, 26]: continue inds = index['ID'] == id _config = index['config'][inds] _beams_cy = index['beams_cy'][inds] # Check that these variables are consistent if not isuniform(_config): raise Exception("config are not identical for id: 0x{:X}." .format(id)) if not isuniform(_beams_cy): raise Exception("beams_cy are not identical for id: 0x{:X}." .format(id)) # Now that we've confirmed they are the same: config[id] = headconfig_int2dict(_config[0]) config[id].update(beams_cy_int2dict(_beams_cy[0], id)) config[id]['_config'] = _config[0] config[id]['_beams_cy'] = _beams_cy[0] config[id].pop('cy') return config
[ "def get_simple_info_for_index(self, index=None, params={}, **kwargs):\n raw = self.client.cat.indices(index, params=params, **kwargs).split('\\n')\n list = []\n for r in raw:\n alter = r.split(' ')\n if len(alter) < 10: continue\n dict = {\n 'health': alter[0],\n 'status': alter[1],\n 'index': alter[2],\n }\n if len(alter) == 11:\n # May appear split fail (alter[3] is a empty string)\n dict['uuid'] = alter[4]\n i = 5\n else:\n dict['uuid'] = alter[3]\n i = 4\n dict['pri'] = alter[i]\n i += 1\n dict['rep'] = alter[i]\n i += 1\n dict['docs_count'] = alter[i]\n i += 1\n dict['docs_deleted'] = alter[i]\n i += 1\n dict['store_size'] = alter[i]\n i += 1\n dict['pri_store_size'] = alter[i]\n list.append(dict)\n logger.info('Acquire simple information of the index is done succeeded: %s' % len(list))\n return list", "def index_info(self, remote=False):\n # http://localhost:7080/index_manager\n\n async def _enhance(conf):\n conf = copy.deepcopy(conf)\n\n for name, env in self.register.items():\n async with AsyncElasticsearch(**env[\"args\"]) as client:\n try:\n indices = await client.indices.get(\"*\")\n except elasticsearch.exceptions.ConnectionError:\n ... # keep the hard-coded place-holders info\n else: # replace the index key with remote info\n conf[\"env\"][name][\"index\"] = [\n {\n \"index\": k,\n \"aliases\": list(v[\"aliases\"].keys()),\n \"doc_type\": v[\"mappings\"][\"_meta\"][\"biothing_type\"],\n }\n for k, v in indices.items()\n ]\n return conf\n\n if remote:\n job = asyncio.ensure_future(_enhance(self._config))\n job.add_done_callback(self.logger.debug)\n return job\n\n return self._config", "def get_settings(self):\n index_settings = {}\n for path in glob.iglob(self.data_path + '/index/*.json'):\n logger.debug('Reading index setup from {}'.format(path))\n setup = None\n with open(path) as f:\n setup = json.load(f)\n index_name = setup['index']\n index_setup = setup['setup']\n index_settings[index_name] = index_setup\n return index_settings", "def index_config(self) -> Optional[pulumi.Input['FieldIndexConfigArgs']]:\n return pulumi.get(self, \"index_config\")", "def __get_index_info__(self, index_name):\n result = self.__description__.get_indices()\n if result is not None:\n result = result.get(index_name, None)\n return result", "def get_config(self) -> dict:\n info = super().get_config()\n info['x_names'] = self.x_names\n info['model_name'] = self.amici_model.getName()\n info['solver'] = str(type(self.amici_solver))\n info['sensi_order'] = self.max_sensi_order\n\n return info", "def collect_state(self, configurations):\n num_ex_gw_ports = 0\n num_interfaces = 0\n num_floating_ips = 0\n router_infos = self.router_info.values()\n num_routers = len(router_infos)\n num_hd_routers = collections.defaultdict(int)\n for ri in router_infos:\n ex_gw_port = ri.router.get('gw_port')\n if ex_gw_port:\n num_ex_gw_ports += 1\n num_interfaces += len(ri.router.get(\n l3_constants.INTERFACE_KEY, []))\n num_floating_ips += len(ri.router.get(\n l3_constants.FLOATINGIP_KEY, []))\n hd = ri.router['hosting_device']\n if hd:\n num_hd_routers[hd['id']] += 1\n routers_per_hd = dict((hd_id, {'routers': num})\n for hd_id, num in num_hd_routers.items())\n non_responding = self._dev_status.get_backlogged_hosting_devices()\n configurations['total routers'] = num_routers\n configurations['total ex_gw_ports'] = num_ex_gw_ports\n configurations['total interfaces'] = num_interfaces\n configurations['total floating_ips'] = num_floating_ips\n configurations['hosting_devices'] = routers_per_hd\n configurations['non_responding_hosting_devices'] = non_responding\n return configurations", "def calc_statistics(self):\n pass", "def index(self):\n if self.query['queryType'] == 'scan':\n if not self.query.get('columns') or '__time' in self.query['columns']:\n return ['__time']\n return []\n if self.query['queryType'] in {'groupBy', 'topN', 'timeseries'}:\n index_fields = [] if self.query['granularity'] == 'all' else ['timestamp']\n if self.query['queryType'] == 'groupBy':\n return index_fields + self.query['dimensions']\n elif self.query['queryType'] == 'topN':\n return index_fields + [self.query['dimension']]\n elif self.query['queryType'] == 'timeseries':\n return index_fields", "def get_statistics(self):\n return {\n \"as_n\": ASS_AMOUNT,\n \"links\": LINKS_AMOUNT,\n \"partitions\": [{\n \"id\": i+1,\n \"as_n\": len(t),\n \"links\": self.partitions_links[i] if i < len(self.partitions_links) else 0,\n \"internal_links\": self.partitions_internal_links[i] if i < len(self.partitions_links) else 0\n } for (i,t) in enumerate(self.partitions)]\n }", "def supplemental_index_information(cls, modulestore, structure):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def index_settings(self):\n url = self.hostname + '/settings/indexes'\n return self._get(url)", "def make_config(self):\n if not self.search_terms:\n self.make_search_terms()\n if not self.stmts:\n self.make_gene_statements()\n config = dict()\n config['name'] = self.name\n config['human_readable_name'] = self.human_readable_name\n config['search_terms'] = [st.to_json() for st in self.search_terms]\n config['assembly'] = {\n 'belief_cutoff': 0.8,\n 'filter_ungrounded': True\n }\n if self.description:\n config['description'] = self.description\n return config", "def __buildIndex(parsedData, template, info):\n #first lets read the template\n #print u'Building the index.' \n templateData = {}\n key = template.find('<nowiki>')\n lastKey = template.find('</nowiki>')\n if key == -1:\n key = template.find('<pre>')\n lastKey = template.find('</pre>')\n importantStuff = template[key+8:lastKey]\n split = re.split('<!--\\s', importantStuff)\n for item in split:\n if item.startswith('HEADER'):\n templateData['header'] = item[11:]\n elif item.startswith('ROW'):\n templateData['row'] = item[8:]\n elif item.startswith('ALT ROW'):\n templateData['altrow'] = item[12:]\n elif item.startswith('FOOTER'):\n templateData['footer'] = item[11:]\n elif item.startswith('END'):\n templateData['end'] = item[8:]\n elif item.startswith('LEAD'):\n templateData['lead'] = item[9:]\n if not templateData.has_key('altrow'):\n templateData['altrow'] = templateData['row']\n if not templateData.has_key('lead'):\n templateData['lead'] = ''\n if not templateData.has_key('end'):\n templateData['end'] = ''\n #print templateData\n #finished reading the template\n indexText = '<!-- Legobot can blank this -->'\n indexText += templateData['lead']\n reportInfo = 'Report generated based on a request from [[%s]]. It matches the following masks: ' % pywikibot.Page(SITE, info['talkpage']).title()\n reportInfo += ' ,'.join([m.strip() for m in info['mask']])\n reportInfo += '\\n<br />\\nIt was generated at ~~~~~ by [[User:Legobot|Legobot]].\\n'\n indexText += reportInfo\n indexText += templateData['header']\n alt = False\n for item in parsedData:\n if alt:\n rowText = templateData['altrow']\n alt = False\n else:\n rowText = templateData['row']\n alt = True\n rowText = rowText.replace('%%topic%%', item['topic'])\n rowText = rowText.replace('%%replies%%', str(item['replies']))\n rowText = rowText.replace('%%link%%', item['link'])\n rowText = rowText.replace('%%first%%', item['first'])\n rowText = rowText.replace('%%firstepoch%%', str(item['firstepoch']))\n rowText = rowText.replace('%%last%%', item['last'])\n rowText = rowText.replace('%%lastepoch%%', str(item['lastepoch']))\n rowText = rowText.replace('%%duration%%', item['duration'])\n rowText = rowText.replace('%%durationsecs%%', str(item['durationsecs']))\n indexText += rowText\n indexText += templateData['footer']\n indexText += templateData['end']\n return indexText", "def _build_index(self):\n self.kstpkper # array of time step/stress periods with data available\n self.recordarray # array of data headers\n self.iposarray # array of seek positions for each record\n self.nlay # Number of model layers\n\n # Get total file size\n self.file.seek(0, 2)\n self.totalbytes = self.file.tell()\n self.file.seek(0, 0)\n\n # Process first header\n self.header = self._get_text_header()\n header_info = self.header.read_header(self.file)[0]\n\n self.nrow = header_info['nrow']\n self.ncol = header_info['ncol']\n\n ipos = self.file.tell()\n self._store_record(header_info, ipos)\n\n # Process enough data to calculate seek distance between headers\n self._col_data_size = self._get_data_size(header_info)\n self._data_size = self._col_data_size * self.nrow\n\n # While more data in file\n while ipos + self._data_size < self.totalbytes:\n # Seek and get next header\n self.file.seek(ipos + self._data_size)\n header_info = self.header.read_header(self.file)[0]\n ipos = self.file.tell()\n self._store_record(header_info, ipos)\n\n # self.recordarray contains a recordarray of all the headers.\n self.recordarray = np.array(self.recordarray, self.header.get_dtype())\n self.iposarray = np.array(self.iposarray)\n self.nlay = np.max(self.recordarray['ilay'])\n return", "def index_settings(self):\n url = f'{self.hostname}/settings/indexes'\n return self._get(url)", "async def stats(\n self,\n *,\n index: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]] = None,\n metric: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n completion_fields: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n error_trace: t.Optional[bool] = None,\n expand_wildcards: t.Optional[\n t.Union[\n t.Union[\"t.Literal['all', 'closed', 'hidden', 'none', 'open']\", str],\n t.Union[\n t.List[\n t.Union[\n \"t.Literal['all', 'closed', 'hidden', 'none', 'open']\", str\n ]\n ],\n t.Tuple[\n t.Union[\n \"t.Literal['all', 'closed', 'hidden', 'none', 'open']\", str\n ],\n ...,\n ],\n ],\n ]\n ] = None,\n fielddata_fields: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n fields: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n filter_path: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n forbid_closed_indices: t.Optional[bool] = None,\n groups: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n human: t.Optional[bool] = None,\n include_segment_file_sizes: t.Optional[bool] = None,\n include_unloaded_segments: t.Optional[bool] = None,\n level: t.Optional[\n t.Union[\"t.Literal['cluster', 'indices', 'shards']\", str]\n ] = None,\n pretty: t.Optional[bool] = None,\n ) -> ObjectApiResponse[t.Any]:\n if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH:\n __path = f\"/{_quote(index)}/_stats/{_quote(metric)}\"\n elif index not in SKIP_IN_PATH:\n __path = f\"/{_quote(index)}/_stats\"\n elif metric not in SKIP_IN_PATH:\n __path = f\"/_stats/{_quote(metric)}\"\n else:\n __path = \"/_stats\"\n __query: t.Dict[str, t.Any] = {}\n if completion_fields is not None:\n __query[\"completion_fields\"] = completion_fields\n if error_trace is not None:\n __query[\"error_trace\"] = error_trace\n if expand_wildcards is not None:\n __query[\"expand_wildcards\"] = expand_wildcards\n if fielddata_fields is not None:\n __query[\"fielddata_fields\"] = fielddata_fields\n if fields is not None:\n __query[\"fields\"] = fields\n if filter_path is not None:\n __query[\"filter_path\"] = filter_path\n if forbid_closed_indices is not None:\n __query[\"forbid_closed_indices\"] = forbid_closed_indices\n if groups is not None:\n __query[\"groups\"] = groups\n if human is not None:\n __query[\"human\"] = human\n if include_segment_file_sizes is not None:\n __query[\"include_segment_file_sizes\"] = include_segment_file_sizes\n if include_unloaded_segments is not None:\n __query[\"include_unloaded_segments\"] = include_unloaded_segments\n if level is not None:\n __query[\"level\"] = level\n if pretty is not None:\n __query[\"pretty\"] = pretty\n __headers = {\"accept\": \"application/json\"}\n return await self.perform_request( # type: ignore[return-value]\n \"GET\", __path, params=__query, headers=__headers\n )", "def indices_stats(self):\n self.logger.debug(\"start ElasticmonClient.indices_stats\")\n istats = self.es_client.indices.stats(index='_all')\n self.logger.debug(\"end ElasticmonClient.indices_stats\")\n return istats", "def configfilepopulator(self):\n # Set the number of cycles for each read and index using the number of reads specified in the sample sheet\n self.forwardlength = int(self.metadata.header.forwardlength)\n self.reverselength = int(self.metadata.header.reverselength)\n # Create a list of lists containing [cycle start, cycle end, and :runid] for each of forward reads, index 1\n # index 2, and reverse reads\n cycles = [[1, self.forwardlength, self.runid],\n [self.forwardlength + 1, self.forwardlength + 8, self.runid],\n [self.forwardlength + 9, self.forwardlength + 16, self.runid],\n [self.forwardlength + 17, self.forwardlength + 16 + self.reverselength, self.runid]]\n # A dictionary of parameters (keys) and the values to use when repopulating the config file\n parameters = {'RunFolder': self.runid, 'RunFolderDate': self.metadata.date.replace(\"-\", \"\"),\n 'RunFolderId': self.metadata.runnumber, 'RunFlowcellId': self.metadata.flowcell}\n # Load the xml file using element tree\n config = ElementTree.parse(os.path.join(self.miseqpath, self.miseqfolder, 'Data', 'Intensities', 'BaseCalls',\n 'config.xml'))\n # Get the root of the tree\n configroot = config.getroot()\n # The run node is the only child node of the root\n for run in configroot:\n # Iterate through the child nodes. There are three nodes sections that must be populated\n for child in run:\n # Find the cycles tag\n if child.tag == 'Cycles':\n # Set the attributes with a dictionary containing the total reads\n child.attrib = {'Last': '{}'.format(self.forwardlength + 16 + self.reverselength),\n 'Number': '{}'.format(self.totalreads), 'First': '1'}\n elif child.tag == 'RunParameters':\n # Name the child as runparameter for easier coding\n runparameters = child\n for runparameter in runparameters:\n # This replaces data in both 'ImagingReads' and 'Reads' nodes\n if 'Reads' in runparameter.tag:\n # Enumerate through the run parameters\n for indexcount, reads in enumerate(runparameter):\n # The values for the index are 1, 2, 3, 4. Subtract one to get the index of the first\n # list in cycles\n index = int(runparameter.attrib['Index']) - 1\n # Set the text value as the appropriate value from cycles\n reads.text = str(cycles[index][indexcount])\n # Populate the instrument value\n if runparameter.tag == 'Instrument':\n runparameter.text = self.instrument\n # Iterate through the parameters in the parameter dictionary\n for parameter in parameters:\n # If the key is encountered\n if runparameter.tag == parameter:\n # Replace the text with the value\n runparameter.text = parameters[parameter]\n if 'Barcode' in runparameter.tag:\n for cycle, barcode in enumerate(runparameter):\n # Add the barcode cycles. These are the number of forward reads (+ 1 as the barcode\n # starts 1 cycle after the first run) plus the current iterator\n barcode.text = str(self.forwardlength + 1 + cycle)\n # Write the modified config file to the desired location\n config.write(os.path.join(self.miseqfolder, 'Data', 'Intensities', 'BaseCalls', 'config.xml'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get random proxy from proxypool
def get_random_proxy(): return requests.get(proxypool_url).text.strip()
[ "def get_proxy():\n conn = get_conn()\n return conn.random()", "def get_random_proxie(proxies=None):\n i_proxies = proxies or []\n if i_proxies:\n return random.choice(proxies or [])\n return None", "def choose_proxy():\n global RECENT_PROXIES\n\n proxies = get_proxies()\n\n chosen = False\n proxy = None\n max_attempts = 30\n retry_attempts = 10\n attempt = 0\n attempts_made = 0\n while not chosen:\n proxy = random.choice(proxies)\n if proxy not in RECENT_PROXIES:\n chosen = True\n RECENT_PROXIES.append(proxy)\n if len(RECENT_PROXIES) > RECENT_MAX:\n RECENT_PROXIES.pop(0)\n else:\n attempt += 1\n attempts_made += 1\n if attempts_made > max_attempts:\n break\n\n # Some chefs can take hours or days, so our proxy list may be stale.\n # Try refreshing the proxy list.\n if attempt == retry_attempts:\n attempt = 0\n proxies = get_proxies(refresh=True)\n\n return proxy", "def get_proxy(self):\n address = next(self._address_pool_cycle) # pick random address\n proxy = {\"http\": address, \"https\": address}\n return proxy", "def get_pool(self, **kwargs) -> type(set):\n anonymity = kwargs.get('anonymity', 'elite proxy').upper()\n https = kwargs.get('https', 'yes')\n proxy_pool = set()\n # Filter proxy pool as per anonymity or https requirements\n filtered = self.data_frame[\n (self.data_frame['anonymity'] == anonymity)\n & (self.data_frame['https'] == https)\n ]\n for ip, port in zip(filtered['ip'], filtered['port']):\n proxy_pool.add(f\"{ip}:{port}\")\n return proxy_pool", "def get_proxy(url):\n\tc_url = url.split('/')[0] + \"//\" + url.split('/')[2]\n\tdatabase = \"ETL_Config\"\n\ttable = \"EliteProxy\"\n\tproxy_id = pipeline.select(database=\"ETL_Config\", table=\"EliteProxy\", column=\"ProxyID\")\n\ti_d = \"\"\n\tif not proxy_id or int(len(proxy_id)) <= 3:\n\t\tscrap_proxy()\n\t\tget_proxy(url=c_url)\n\telse:\n\t\ti_d = proxy_id[random.randrange(int(len(proxy_id)))][0]\n\theaders = useragent.get_agent()\n\tproxy = validate_proxy(database=database, table=table, url=c_url, i_d=i_d, header=headers)\n\treturn proxy, headers", "def getProxy(self, rank):\n return self.proxies[rank]", "def get_proxy(proxy_total):\n\n config = cfg.get_config()\n conn = db.connect()\n\n xml_path = config[\"paths\"][\"xml_path\"]\n proxy_path = config[\"paths\"][\"proxy_path\"]\n tmp_checkin = config[\"paths\"][\"tmp\"]\n root_path = config[\"paths\"][\"root_path\"]\n\n rows = db.fetchall_proxy(\"assets\")\n\n proxy_count = 0\n\n for row in rows:\n rowid = row[0]\n guid = str(row[1])\n proxy_copied = row[22]\n guid_x = guid.replace(\"-\", \"\")\n guid_r = guid_x[24:]\n proxy_fn = guid + \".mov\"\n\n \"\"\"\n Use the parts GUID to generate a list that will be used to build the path to the proxy.\n \"\"\"\n n = 2\n glist = [guid_r[i : i + n] for i in range(0, len(guid_r), n)]\n\n proxy_fpath = os.path.join(proxy_path, glist[2], glist[3], guid, proxy_fn)\n\n if (\n proxy_count < int(proxy_total)\n and proxy_copied == 0\n and os.path.exists(proxy_fpath) is True\n ):\n\n try:\n pcopy = file_copy(proxy_fpath, tmp_checkin)\n\n if len(pcopy) == 0:\n row = db.fetchone_proxy(guid)\n db.update_column(\"assets\", \"proxy_copied\", 1, rowid)\n proxy_cp_msg = f\"{proxy_fn} was copied to the dalet tmp.\"\n logger.info(proxy_cp_msg)\n proxy_count += 1\n else:\n pass\n proxy_err_cp_msg = (\n f\"{proxy_fn} encountered an error on the copy to the dalet tmp.\"\n )\n logger.info(proxy_err_cp_msg)\n\n except Exception as e:\n proxy_excp_msg = f\"\\n\\\n Exception raised on the Proxy copy.\\n\\\n Error Message: {str(e)} \\n\\\n \"\n logger.exception(proxy_excp_msg)\n break\n else:\n if os.path.exists(proxy_fpath) is not True:\n proxy_err_msg = f\"Proxy path does not exist. \\n\\\n {proxy_fpath}\"\n logger.error(proxy_err_msg)\n db.update_column(\"assets\", \"proxy_copied\", 2, rowid)\n continue\n\n os.chdir(root_path)\n proxy_complete_msg = f\"PROXY COPY COMPLETE. \\n\\\n {proxy_count} proxies copied \\n\"\n\n logger.info(proxy_complete_msg)\n\n return", "def get_proxy() -> dict:\n try:\n proxy = {\"https\": (next(PROXY_LIST))}\n return proxy\n except StopIteration:\n # We'll return to the top of our list once we run out of proxies\n PROXY_LIST.seek(0)\n proxy = {\"https\": (next(PROXY_LIST))}\n return proxy", "def select_proxy(self, proxy):\n cmd = 'SELECT proxy FROM proxies WHERE proxy = ?;'\n self._safe_execute(cmd, proxy)\n return self.cursor", "def _get_proxies(url):\n \n try:\n r_obj = requests.get(url)\n except:\n proxy = 0\n while not type(proxy) == str:\n \n try:\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n ip = random.choice(parser.xpath('//tbody/tr'))\n if ip.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([ip.xpath('.//td[1]/text()')[0], ip.xpath('.//td[2]/text()')[0]])\n proxies = {\"http\": proxy, \"https\": proxy}\n \n except:\n continue\n \n r_obj = requests.get(url, proxies)\n \n return r_obj", "def get_proxy(self):\n response = requests.get(PROXY_POOL_URL)\n if response.status_code == 200:\n logger.debug(f'get proxy {response.text}')\n return response.text", "def obtain(proxy):\n return pickle.loads(pickle.dumps(proxy))", "def genpool(size):\n pool = list()\n for i in range(size):\n pool.append(random.randrange(1, 100))\n return pool", "def get_proxy (self):\n return self.proxy", "def _add_one_proxy(self, proxy_entry):\n random_time = random.random() / 100.0\n time.sleep(random_time)\n self._proxydb.add_new_proxy(proxy_entry[0], proxy_entry[1], proxy_entry[2], proxy_entry[3])", "def test09_lazy_member(self):\n p = LazyMemberResourcePool(POOL2, size=10, prefix='machine2')\n pool = MemcachePool(['localhost'], p, timeout=10)\n #pool.sync()\n i = pool.get('member1', ip_pool=[{'ip': '1.1.1.1'}])\n self.assertEqual(i.value, (IPAddress('1.1.1.1'), 20000))\n i = pool.get('member2', ip_pool=[{'ip': '1.1.1.1'}])\n self.assertEqual(i.value, (IPAddress('1.1.1.1'), 20001))\n i = pool.get('member1', ip_pool=[{'ip': '1.1.1.1'}])\n self.assertEqual(i.value, (IPAddress('1.1.1.1'), 20000))", "def get_random_server(servers):\n\n return 'https://{}.omegle.com'.format(random.choice(servers))", "def generate_proxies_queue(proxies):\n while True:\n yield proxies[0]\n used = proxies.popleft()\n proxies.append(used)", "def check_proxies():\n while True:\n proxy = q.get()\n res = test_proxy('http://' + proxy)\n if res:\n pl.append(proxy)\n print 'GOOD ' + proxy" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kospi stock buy order completed method
def post_koa_normal_buy_kp_ord(self, trcode, rqname, next): self.logger.info("kospi stock buy order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_buy_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_sell_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def _buy(self):\r\n self._handleLogs(self.game.buy())\r\n self.redraw()", "def post_koa_normal_sell_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def check_buy(self, data):\r\n for my_position in self.position:\r\n high_price = data[my_position['code']]['high']\r\n value_that_we_have = my_position['value']\r\n \r\n # if one of our stocks has dropped by 5%, buy more of it in the hopes that it will go up\r\n if value_that_we_have * .95 >= high_price:\r\n self.buy(my_position['code'], high_price, my_position['total_invested'] * 0.025)\r\n \r\n rank_dict = {}\r\n # check for new stock\r\n for key in data:\r\n # if key doesnt exist in position \r\n if not any(key in pos for pos in self.position):\r\n diff = abs(data[key]['close'] - self.statbot.calc_bands(key)[0])\r\n if data[key][\"close\"] < self.statbot.calc_bands(key)[0] and self.statbot.get_rsi(key) <= 30:\r\n #access exchange api to purchase more stock\r\n self.add_buy(key, data[key][\"close\"], self.get_buy_amount())\r\n rank_dict[key] = self.get_score(BUY, self.statbot.get_rsi(key), diff)\r\n\r\n # check if buying any\r\n if len(self.buying) != 0:\r\n # sorts buying based on value of rank\r\n self.buying.sort(key = lambda x : -rank_dict[x['code']])", "async def buy(self, ctx, stock: str, amount: int):\n if not self.trading:\n await ctx.channel.send(embed=self.embed(\"Trading has been disabled currently!\"))\n return\n if ctx.author.id not in self.users:\n await ctx.channel.send(embed=self.embed(\"You need to set your handle using the `+register` command first.\"))\n return\n if amount <= 0:\n await ctx.channel.send(embed=self.embed(\"You must buy atleast 1 stock.\"))\n return\n info = self.db.get_stock(stock)\n rating = await self.cf.get_rating(stock)\n money = self.db.get_balance(ctx.author.id)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock, 0xFF0000))\n return\n market = 0\n owned = 0\n owns = False\n for owner, quantity in info:\n if owner == ctx.author.id:\n owns = True\n owned = quantity\n if owner == -1:\n market = quantity\n if amount > market:\n await ctx.channel.send(embed=self.embed(\"You cannot buy more stocks than avaiable in the market!\"))\n return\n cost = amount * self.stock_value(rating)\n if cost > money:\n await ctx.channel.send(embed=self.embed(\"You do not have enough money to purchase %d stocks!\" % amount))\n return\n self.db.set_balance(ctx.author.id, money - cost)\n if owns:\n self.db.update_holding(ctx.author.id, stock, owned + amount)\n else:\n self.db.create_holding(ctx.author.id, stock, owned + amount)\n self.db.update_market(stock, market - amount)\n\n await ctx.channel.send(\n embed=self.embed(ctx.author.mention + \", Successfully purchased %d stocks of **%s** for **$%.2f!**\"\n \"\\n\\n Your new balance is **$%.2f**.\"\n % (amount, stock, cost, money-cost), 0x00FF00))", "def did_complete_buy_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def notify_purchased(self):\n notify(CheckoutComplete(self.old_cart))", "def test_make_order(self):\n df_stock = self.quant.handle_data(self.quant.data[self.symbol], **self.hd_args)\n df_signal = self.quant.create_signal(df_stock, **self.cs_args)\n\n print 'symbol:', self.symbol\n for expire in (False, True):\n print 'expire set:', expire\n\n df_order = self.strategy.make_order(df_stock, df_signal, expire=expire, **self.args)\n df_order['diff'] = df_order['stock0'] - df_order['strike']\n\n print df_order.to_string(line_width=300)\n\n pct_chg = df_order['pct_chg']\n pct_chg = pct_chg[pct_chg < 10]\n print pct_chg.sum(), np.round(pct_chg.mean(), 2),\n print np.round(float(pct_chg[pct_chg > 0].count() / float(pct_chg.count())), 2),\n print np.round(float(pct_chg[pct_chg < 0].count() / float(pct_chg.count())), 2)\n\n print '-' * 100 + '\\n'", "def _buyLog(self, result):\r\n if result[0] == \"Buy\":\r\n self._createBuyWindow()\r\n if result[0] == \"Buy Success\":\r\n self._log(result[1])\r\n if self._buyWindow is not None:\r\n self._buyWindow.destroy()\r\n if result[0] == \"Buy Fail\":\r\n self._log(result[1])", "def check_for_trade(self):\n\n current_price: float = self.get_symbol_price()\n\n # Get the historal_data and append new current_price for reference.\n current_key: int = self.daily_history_data.keys()[-1]\n current_key += 1\n self.daily_history_data[current_key] = current_price\n\n\n # If we have not yet made any buy orders, then we can't sell anything.\n if self.buy_price == float(0):\n pass\n \n else:\n # If we have a buy price, we need to make sure we sell when we have a profit.\n percent_change: float = float(((current_price - self.buy_price) / self.buy_price) * 100)\n if percent_change > self.percent_sell:\n # Execute a sell order.\n # self.sell_crypto_order(self.get_symbol_positions())\n pass", "def __exec_buysell_order(self, order, market, open_exec_price, close_exec_price):\n result = False\n\n self.lock()\n\n base_asset = self.__get_or_add_asset(market.base)\n quote_asset = self.__get_or_add_asset(market.quote)\n\n quote_market = self._markets.get(quote_asset.symbol+quote_asset.quote)\n quote_exec_price = quote_market.price if quote_market else 1.0\n\n if order.direction == Position.LONG:\n # buy\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * open_exec_price # quote_market.adjust_quantity(base_qty * open_exec_price) if quote_market else self.adjust_quantity(base_qty * open_exec_price)\n\n # @todo free quantity\n if quote_qty > quote_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought quote asset quantity for %s with %s (have %s)!\" % (quote_asset.symbol, quote_qty, quote_asset.quantity))\n return False\n\n # retain the fee on the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty += commission_amount\n\n # base asset. it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, open_exec_price, base_qty, True, self.timestamp)\n # quote asset\n self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, False, self.timestamp)\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + full traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': open_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n elif order.direction == Position.SHORT:\n # sell\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * close_exec_price # quote_market.adjust_quantity(base_qty * close_exec_price) if quote_market else self.adjust_quantity(base_qty * close_exec_price)\n\n # @todo free quantity\n if base_qty > base_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought base asset quantity for %s with %s (have %s)!\" % (\n base_asset.symbol, market.format_quantity(base_qty), market.format_quantity(base_asset.quantity)))\n\n return False\n\n # retain the fee from the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty -= commission_amount\n\n # approximation of the profit/loss according to the average price of the base asset\n delta_price = close_exec_price - base_asset.price\n\n # it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, close_exec_price, base_qty, False, self.timestamp)\n # quote asset\n position_gain_loss_currency = self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, True, self.timestamp)\n\n gain_loss_rate = ((close_exec_price - base_asset.price) / base_asset.price) if base_asset.price else 0.0\n position_gain_loss = delta_price * base_qty\n position_gain_loss_currency *= gain_loss_rate\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance, delta_price/market.one_pip_means,\n gain_loss_rate, position_gain_loss, position_gain_loss_currency)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + fully traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': close_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n return result", "def on_end_buying_round(self, player_state, round_history):\n pass", "def execute_order(self, event):\n\n if event.type == 'ORDER':\n # Prepare the parameters for the asset order\n asset = event.symbol\n asset_type = \"STK\"\n order_type = event.order_type\n quantity = event.quantity\n direction = event.direction\n\n # Create the Interactive Brokers contract via the passed Order\n # event\n ib_contract = sefl.create_contract(\n asset, asset_type, self.order_routing, self.order_routing,\n self.currency\n )\n # Create the Interactive Brokers order via the passed Order event\n ib_order = self.create_order(\n order_type, quantity, direction\n )\n\n # Use the connection to send the order to IB\n self.tws_conn.placeOrder(\n self.order_id, ib_contract, ib_order\n )\n\n # NOTE: The following line is essential to ensure that orders\n # connect and collect server responses appropriately. In essence a\n # one second delay between filling and returning order details\n # ensures that each order processes optimally. Without this I've\n # witnessed the process crash.\n time.sleep(1)\n\n # Increment the order ID for this ordering session\n self.order_id += 1", "def update_order(self, event):\n if event.type == 'ORDER':\n # event.print_order()\n cur_quantity = self.current_positions[event.symbol]\n if event.direction == 'EXIT':\n assert(cur_quantity != 0)\n assert(event.quantity == 0)\n if cur_quantity > 0:\n event.direction = \"SELL\"\n else:\n event.direction = \"BUY\"\n event.quantity = abs(cur_quantity)\n elif event.direction == \"ALLBUY\":\n current_price = self.bars.get_latest_bar_value(event.symbol, 'close')\n capital = self.current_holdings['cash']\n assert(cur_quantity == 0)\n assert(event.quantity == 0)\n event.quantity = int(capital / current_price)\n event.direction = \"BUY\"\n elif event.direction == \"ALLSELL\":\n current_price = self.bars.get_latest_bar_value(event.symbol, 'close')\n capital = self.current_holdings['cash']\n assert(cur_quantity == 0)\n assert(event.quantity == 0)\n event.quantity = int(capital / current_price)\n event.direction = \"SELL\"\n\n self.update_positions_from_order(event)\n self.update_holdings_from_order(event)", "def callAutoTransaction(dealtype,ParameterList):\r\n\ttime.sleep(1)\r\n\t[stockcode,atprice,vol] = ParameterList\r\n\tfillOrderData(dealtype,stockcode,atprice,vol)\r\n\tlogger.info(\"\\n +++==stock deal %s @ ++==\"%dealtype)\r\n\tlogger.info(\"+++==stock deal Parameters===> \\n %s \"%ParameterList)", "async def order_market_buy(self, **params):\r\n return await self.client_helper(\"order_market_buy\", **params)", "def soldout():", "def buy(self, date, shares, price):\n # step 1\n fee = self.broker.calcFee(shares, price)\n # step 2\n order_volume = shares * price\n # step 3\n if self.broker.balance < ( order_volume + fee ) :\n # zero transaction\n shares = 0\n fee = 0\n order_volume = shares * price\n # step 4\n self.orderbook.addTransaction(date, 'buy', self.__stock, shares, price, fee)\n self.broker.balance -= order_volume + fee" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kosdaq stock buy order completed method
def post_koa_normal_buy_kq_ord(self, trcode, rqname, next): self.logger.info("kosdaq stock buy order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_buy_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def did_complete_buy_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def post_koa_normal_sell_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def notify_purchased(self):\n notify(CheckoutComplete(self.old_cart))", "def post_koa_normal_sell_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def _buy(self):\r\n self._handleLogs(self.game.buy())\r\n self.redraw()", "def did_complete_sell_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def __exec_buysell_order(self, order, market, open_exec_price, close_exec_price):\n result = False\n\n self.lock()\n\n base_asset = self.__get_or_add_asset(market.base)\n quote_asset = self.__get_or_add_asset(market.quote)\n\n quote_market = self._markets.get(quote_asset.symbol+quote_asset.quote)\n quote_exec_price = quote_market.price if quote_market else 1.0\n\n if order.direction == Position.LONG:\n # buy\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * open_exec_price # quote_market.adjust_quantity(base_qty * open_exec_price) if quote_market else self.adjust_quantity(base_qty * open_exec_price)\n\n # @todo free quantity\n if quote_qty > quote_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought quote asset quantity for %s with %s (have %s)!\" % (quote_asset.symbol, quote_qty, quote_asset.quantity))\n return False\n\n # retain the fee on the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty += commission_amount\n\n # base asset. it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, open_exec_price, base_qty, True, self.timestamp)\n # quote asset\n self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, False, self.timestamp)\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + full traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': open_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n elif order.direction == Position.SHORT:\n # sell\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * close_exec_price # quote_market.adjust_quantity(base_qty * close_exec_price) if quote_market else self.adjust_quantity(base_qty * close_exec_price)\n\n # @todo free quantity\n if base_qty > base_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought base asset quantity for %s with %s (have %s)!\" % (\n base_asset.symbol, market.format_quantity(base_qty), market.format_quantity(base_asset.quantity)))\n\n return False\n\n # retain the fee from the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty -= commission_amount\n\n # approximation of the profit/loss according to the average price of the base asset\n delta_price = close_exec_price - base_asset.price\n\n # it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, close_exec_price, base_qty, False, self.timestamp)\n # quote asset\n position_gain_loss_currency = self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, True, self.timestamp)\n\n gain_loss_rate = ((close_exec_price - base_asset.price) / base_asset.price) if base_asset.price else 0.0\n position_gain_loss = delta_price * base_qty\n position_gain_loss_currency *= gain_loss_rate\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance, delta_price/market.one_pip_means,\n gain_loss_rate, position_gain_loss, position_gain_loss_currency)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + fully traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': close_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n return result", "def test_make_order(self):\n df_stock = self.quant.handle_data(self.quant.data[self.symbol], **self.hd_args)\n df_signal = self.quant.create_signal(df_stock, **self.cs_args)\n\n print 'symbol:', self.symbol\n for expire in (False, True):\n print 'expire set:', expire\n\n df_order = self.strategy.make_order(df_stock, df_signal, expire=expire, **self.args)\n df_order['diff'] = df_order['stock0'] - df_order['strike']\n\n print df_order.to_string(line_width=300)\n\n pct_chg = df_order['pct_chg']\n pct_chg = pct_chg[pct_chg < 10]\n print pct_chg.sum(), np.round(pct_chg.mean(), 2),\n print np.round(float(pct_chg[pct_chg > 0].count() / float(pct_chg.count())), 2),\n print np.round(float(pct_chg[pct_chg < 0].count() / float(pct_chg.count())), 2)\n\n print '-' * 100 + '\\n'", "def execute_order(self, event):\n\n if event.type == 'ORDER':\n # Prepare the parameters for the asset order\n asset = event.symbol\n asset_type = \"STK\"\n order_type = event.order_type\n quantity = event.quantity\n direction = event.direction\n\n # Create the Interactive Brokers contract via the passed Order\n # event\n ib_contract = sefl.create_contract(\n asset, asset_type, self.order_routing, self.order_routing,\n self.currency\n )\n # Create the Interactive Brokers order via the passed Order event\n ib_order = self.create_order(\n order_type, quantity, direction\n )\n\n # Use the connection to send the order to IB\n self.tws_conn.placeOrder(\n self.order_id, ib_contract, ib_order\n )\n\n # NOTE: The following line is essential to ensure that orders\n # connect and collect server responses appropriately. In essence a\n # one second delay between filling and returning order details\n # ensures that each order processes optimally. Without this I've\n # witnessed the process crash.\n time.sleep(1)\n\n # Increment the order ID for this ordering session\n self.order_id += 1", "async def buy(self, ctx, stock: str, amount: int):\n if not self.trading:\n await ctx.channel.send(embed=self.embed(\"Trading has been disabled currently!\"))\n return\n if ctx.author.id not in self.users:\n await ctx.channel.send(embed=self.embed(\"You need to set your handle using the `+register` command first.\"))\n return\n if amount <= 0:\n await ctx.channel.send(embed=self.embed(\"You must buy atleast 1 stock.\"))\n return\n info = self.db.get_stock(stock)\n rating = await self.cf.get_rating(stock)\n money = self.db.get_balance(ctx.author.id)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock, 0xFF0000))\n return\n market = 0\n owned = 0\n owns = False\n for owner, quantity in info:\n if owner == ctx.author.id:\n owns = True\n owned = quantity\n if owner == -1:\n market = quantity\n if amount > market:\n await ctx.channel.send(embed=self.embed(\"You cannot buy more stocks than avaiable in the market!\"))\n return\n cost = amount * self.stock_value(rating)\n if cost > money:\n await ctx.channel.send(embed=self.embed(\"You do not have enough money to purchase %d stocks!\" % amount))\n return\n self.db.set_balance(ctx.author.id, money - cost)\n if owns:\n self.db.update_holding(ctx.author.id, stock, owned + amount)\n else:\n self.db.create_holding(ctx.author.id, stock, owned + amount)\n self.db.update_market(stock, market - amount)\n\n await ctx.channel.send(\n embed=self.embed(ctx.author.mention + \", Successfully purchased %d stocks of **%s** for **$%.2f!**\"\n \"\\n\\n Your new balance is **$%.2f**.\"\n % (amount, stock, cost, money-cost), 0x00FF00))", "def _buyLog(self, result):\r\n if result[0] == \"Buy\":\r\n self._createBuyWindow()\r\n if result[0] == \"Buy Success\":\r\n self._log(result[1])\r\n if self._buyWindow is not None:\r\n self._buyWindow.destroy()\r\n if result[0] == \"Buy Fail\":\r\n self._log(result[1])", "def callAutoTransaction(dealtype,ParameterList):\r\n\ttime.sleep(1)\r\n\t[stockcode,atprice,vol] = ParameterList\r\n\tfillOrderData(dealtype,stockcode,atprice,vol)\r\n\tlogger.info(\"\\n +++==stock deal %s @ ++==\"%dealtype)\r\n\tlogger.info(\"+++==stock deal Parameters===> \\n %s \"%ParameterList)", "def check_buy(self, data):\r\n for my_position in self.position:\r\n high_price = data[my_position['code']]['high']\r\n value_that_we_have = my_position['value']\r\n \r\n # if one of our stocks has dropped by 5%, buy more of it in the hopes that it will go up\r\n if value_that_we_have * .95 >= high_price:\r\n self.buy(my_position['code'], high_price, my_position['total_invested'] * 0.025)\r\n \r\n rank_dict = {}\r\n # check for new stock\r\n for key in data:\r\n # if key doesnt exist in position \r\n if not any(key in pos for pos in self.position):\r\n diff = abs(data[key]['close'] - self.statbot.calc_bands(key)[0])\r\n if data[key][\"close\"] < self.statbot.calc_bands(key)[0] and self.statbot.get_rsi(key) <= 30:\r\n #access exchange api to purchase more stock\r\n self.add_buy(key, data[key][\"close\"], self.get_buy_amount())\r\n rank_dict[key] = self.get_score(BUY, self.statbot.get_rsi(key), diff)\r\n\r\n # check if buying any\r\n if len(self.buying) != 0:\r\n # sorts buying based on value of rank\r\n self.buying.sort(key = lambda x : -rank_dict[x['code']])", "def buy(self, date, shares, price):\n # step 1\n fee = self.broker.calcFee(shares, price)\n # step 2\n order_volume = shares * price\n # step 3\n if self.broker.balance < ( order_volume + fee ) :\n # zero transaction\n shares = 0\n fee = 0\n order_volume = shares * price\n # step 4\n self.orderbook.addTransaction(date, 'buy', self.__stock, shares, price, fee)\n self.broker.balance -= order_volume + fee", "def update_order(self, event):\n if event.type == 'ORDER':\n # event.print_order()\n cur_quantity = self.current_positions[event.symbol]\n if event.direction == 'EXIT':\n assert(cur_quantity != 0)\n assert(event.quantity == 0)\n if cur_quantity > 0:\n event.direction = \"SELL\"\n else:\n event.direction = \"BUY\"\n event.quantity = abs(cur_quantity)\n elif event.direction == \"ALLBUY\":\n current_price = self.bars.get_latest_bar_value(event.symbol, 'close')\n capital = self.current_holdings['cash']\n assert(cur_quantity == 0)\n assert(event.quantity == 0)\n event.quantity = int(capital / current_price)\n event.direction = \"BUY\"\n elif event.direction == \"ALLSELL\":\n current_price = self.bars.get_latest_bar_value(event.symbol, 'close')\n capital = self.current_holdings['cash']\n assert(cur_quantity == 0)\n assert(event.quantity == 0)\n event.quantity = int(capital / current_price)\n event.direction = \"SELL\"\n\n self.update_positions_from_order(event)\n self.update_holdings_from_order(event)", "def order_complete(self):\n\n try:\n cart = self.cart\n finance = cart['finance']\n if cart['cart_status']['cart_status_id'] != STATUS_INPROCESS:\n raise CartInvalid(\"Order is not in process\")\n if finance['total_cost'] < 0.0:\n print \"{}: attempt to complete a cart with a total cost of {}\".format(cart['cart_id'], finance['total_cost'])\n raise CartInvalid(\"Cart price is less than 0.\")\n if cart['transaction_amount'] > 0 and cart['total_cost'] > 0:\n # assume we need to settle here\n self.capture(cart['total_cost'])\n\n self.set_status_id(STATUS_COMPLETE)\n c = get_cursor()\n c.execute(\"\"\"\n update cart\n set complete_date = now()\n where cart_id = %s\"\"\",\n ( self.cart['cart_id'],))\n self.log(\"Cart Completed.\")\n c.execute(\"\"\"\n select complete_date\n from cart\n where cart_id = %s\"\"\",\n (self.cart['cart_id'],))\n self.cart['complete_date'] = c.fetchone()['complete_date']\n try:\n self.complete_email()\n self.log(\"Order Complete email sent to {}\".format(self.cart['address']['email']))\n\n except Exception as e:\n self.log(\"Could not send order complete email: {}\".format(e.args[0]))\n except CartInvalid as e:\n raise CartInvalid(e)\n except CartIncomplete as e:\n raise CartIncomplete(e)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")", "def create_spare_purchase_order(self,cr, uid, ids, context=None):\n print\"================================================\"\n picking_obj = self.pool.get('stock.picking')\n stock_move = self.pool.get('stock.move')\n purchase_obj = self.pool.get('purchase.order')\n rec=self.browse(cr, uid, ids)[0]\n qoute_ids = [qoute.id for qoute in rec.q_ids if qoute.state == 'done']\n if not rec.hq:\n if[ir for ir in self.browse(cr, uid, ids) if purchase_obj.search(cr, uid, [('ir_id','=',ir.id)])]:\n raise osv.except_osv(_('Purchase Order(s) Exsits !'), _('The Purchase Order(s) from this purchase requesition was alreadry created..\\n Please .. Check Purchase Orders List ..'))\n else:\n purchase_id = self.pool.get('pur.quote').make_purchase_order(cr, uid, qoute_ids)\n print\">>>>>>>>>>>>>>>>>>>>>>>>purchase_id\",purchase_id\n purchase_obj.write(cr, uid, purchase_id, {'location_id':rec.location_id.id}, context=context)\n self.write(cr, uid, ids, {'state':'wait_purchase','purchase_id':purchase_id[0]}, context=context) \n else:\n quote=self.pool.get('pur.quote').browse(cr, uid, qoute_ids)[0]\n pick_id = picking_obj.create(cr, uid , {\n 'type': 'in',\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),\n 'origin': rec.name,\n 'date': rec.ir_date,\n 'executing_agency': rec.executing_agency,\n 'partner_id': quote.supplier_id.id,\n 'state': 'draft',\n 'department_id':rec.department_id.id,\n 'move_lines' : [],\n 'maintenance':True,\n })\n print\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>pick_id\",pick_id\n for pro in quote.pq_pro_ids:\n move_id = stock_move.create(cr, uid, {\n 'name':pro.name,\n 'picking_id': pick_id,\n 'product_id': pro.product_id.id,\n 'product_qty': pro.product_qty,\n 'product_uos_qty': pro.product_id.uom_id.id,\n 'product_uos': pro.product_id.uom_id.id,\n 'product_uom': pro.product_id.uom_id.id,\n 'location_id': quote.supplier_id.property_stock_supplier.id,\n 'location_dest_id': rec.location_id.id,\n 'price_unit': pro.price_unit,\n 'state': 'draft',\n 'type':'in', \n }) \n self.write(cr, uid, ids, {'picking_id':pick_id}, context=context)\n self.write(cr, uid, ids, {'state':'purchase_officer'}, context=context)\n print\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>move_id\",move_id\n return True", "async def order_oco_buy(self, **params):\r\n return await self.client_helper(\"order_oco_buy\", **params)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kospi stock sell order completed method
def post_koa_normal_sell_kp_ord(self, trcode, rqname, next): self.logger.info("kospi stock sell order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_sell_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_buy_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_buy_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def soldout():", "def sell(self, date, shares, price):\n # step 1\n # sell given shares or all\n total_shares = self.totalStockinDepot()\n shares = min(shares, total_shares)\n # step 2\n fee = self.broker.calcFee(shares, price)\n # step 2\n order_volume = shares * price\n # step 4\n self.orderbook.addTransaction(date, 'sell', self.__stock, shares, price, fee)\n # step 5\n self.broker.balance += (order_volume - fee)\n\n # step 6\n tax = self.broker.calcTax(self.orderbook, self.stock)\n if self.__isNaN(tax) :\n tax = 0\n\n if tax > 0 :\n delta_tax = tax - self.TAX\n self.TAX = tax # overall tax\n self.broker.balance -= delta_tax\n else :\n loss_tax = tax\n if tax == 0:\n loss_tax = 0-self.TAX\n self.broker.balance -= max(loss_tax, 0-self.TAX)\n self.TAX += loss_tax", "async def sell(self, ctx, stock: str, amount: int):\n if not self.trading:\n await ctx.channel.send(embed=self.embed(\"Trading has been disabled currently!\"))\n return\n if ctx.author.id not in self.users:\n await ctx.channel.send(embed=self.embed(\"You need to set your handle using the `+register` command first.\"))\n return\n info = self.db.get_stock(stock)\n rating = await self.cf.get_rating(stock)\n money = self.db.get_balance(ctx.author.id)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock, 0xFF0000))\n return\n owned = 0\n market = 0\n for owner, quantity in info:\n if owner == ctx.author.id:\n owned = quantity\n if owner == -1:\n market = quantity\n if amount <= 0:\n await ctx.channel.send(embed=self.embed(\"You must sell at least 1 stock.\", 0xFF0000))\n return\n if amount > owned:\n await ctx.channel.send(embed=self.embed(\"You cannot sell more stocks than you own.\", 0xFF0000))\n return\n\n profit = self.stock_value(rating) * amount\n self.db.set_balance(ctx.author.id, money + profit)\n self.db.update_holding(ctx.author.id, stock, owned-amount)\n self.db.update_market(stock, market+amount)\n await ctx.channel.send(embed=self.embed(ctx.author.mention+\", Successfully sold %d stocks of **%s** for $%.2f!\"\n % (amount, stock, profit), 0x00FF00))", "def did_complete_sell_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def set_orders(self):\n new_buy_orders, new_sell_orders = api.get_orders(self.currency_pair)\n\n # check if the sell book isn't empty\n if new_sell_orders != []:\n log = 'new_sell_orders : ', new_sell_orders # number of new sell orders\n logging.info(log)\n # remove all sell orders under sell_price_min\n if new_sell_orders[0][2] < self.sell_price_min: # order[2] => rate\n for order in new_sell_orders:\n if order[2] < self.sell_price_min:\n resp = api.cancel_order(self.currency_pair, order[0]) # order[0] => order_number\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n # remove orders if there too much of them\n # checking if the rate of the last order is too big than the\n # supposed right rate relatively to both the increment and nb_order_to_display variables\n if new_sell_orders[-1][2] > self.sell_price_min + self.increment * self.nb_orders_to_display:\n # if so, defining a variable corresponding to the right rate\n price_target = self.sell_price_min + self.increment * self.nb_orders_to_display\n\n # removing the order if greater than the supposed right price\n for order in new_sell_orders:\n if order[2] > price_target:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n # if it remain sells orders\n if new_sell_orders != []:\n i = 0\n target = len(new_sell_orders)\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n log = 'new_sell_orders : ', new_sell_orders\n logging.info(log)\n # check if the first item in new_sell_orders is at sell_price_min\n # or add it\n if new_sell_orders[0][2] != self.sell_price_min:\n # api.set_sell_order is not better?\n order = api.set_sell_order(self.currency_pair, self.sell_price_min, self.amount)\n\n new_sell_orders.insert(0, order)\n\n log = 'Sell order added : ', order\n logging.warning(log)\n\n # incrementing target for the while loop? => because the exclusion of the last integer if not?\n target += 1\n # browse sell_orders to add or removes orders\n while i < target:\n # check for overflow\n if new_sell_orders[i][2] + self.increment > self.sell_price_max:\n i = target\n logging.warning('sell_price_max reached')\n\n else:\n # add a sell order if there is no higher sell in sell_orders\n if i + 1 >= len(new_sell_orders): # possible change : less than sign instead of 'greater than'\n order = api.set_sell_order(self.currency_pair, \\\n (new_sell_orders[i][2] + self.increment), self.amount)\n\n new_sell_orders.insert((i + 1), order)\n\n log = 'Added sell order : ', order\n logging.warning(log)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # remove sell order if there is less than increment between them\n elif new_sell_orders[i + 1][2] - new_sell_orders[i][2] \\\n < self.increment:\n\n resp = api.cancel_order(self.currency_pair, new_sell_orders[i + 1][0])\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n\n target -= 1\n # add sell order if there is more than increment between them\n elif new_sell_orders[i + 1][2] - new_sell_orders[i][2] \\\n > self.increment:\n\n order = api.set_sell_order(self.currency_pair, \\\n (new_sell_orders[i][2] + self.increment), self.amount)\n\n new_sell_orders.insert((i + 1), order)\n\n log = 'Added sell order : ', order\n logging.warning(log)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # increment ok, next round\n else:\n i += 1\n\n self.sell_orders = new_sell_orders[:]\n\n if new_sell_orders == []:\n price_start = self.sell_price_min\n\n logging.warning('no active sell orders')\n\n # set the number of sell orders to execute and check if no more than nb_orders_to_display\n # personal note : recheck the meaning of that condition\n if (self.sell_price_max - self.sell_price_min) / self.increment > self.nb_orders_to_display:\n\n i = int(self.nb_orders_to_display) + 1\n\n else:\n i = int((self.sell_price_max - self.sell_price_min) / self.increment)\n\n log = i, 'sell order to add from : ', price_start, 'to', (price_start + i * self.increment)\n logging.warning(log)\n\n sell_orders_executed = api.set_several_sell_orders(self.currency_pair, price_start, \\\n self.amount, i, self.increment)\n\n self.sell_orders = sell_orders_executed[:]\n\n # When there is orders(s) in new_buy_orders\n if new_buy_orders != []:\n log = 'new_buy_orders : ', new_buy_orders\n logging.info(log)\n # Remove orders with price superior to buy_price_max.\n if new_buy_orders[-1][2] > self.buy_price_max:\n for order in new_buy_orders:\n if order[2] > self.buy_price_max:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n # Remove orders with price under our target\n # Why not set 'buy_price_min'? for the comparison\n if new_buy_orders[0][2] < self.buy_price_max - self.increment * self.nb_orders_to_display:\n\n price_target = self.buy_price_max - self.increment * self.nb_orders_to_display\n\n for order in new_buy_orders:\n if order[2] < price_target:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n # If it remain buy(s) order(s)\n if new_buy_orders != []:\n i = 0\n target = len(new_buy_orders)\n # Add a buy order when the price of the first item in new_buy_orders\n # is not good\n # Why not set 'buy_price_min' for the comparison ?\n if new_buy_orders[0][2] != self.buy_price_max - self.increment \\\n * self.nb_orders_to_display:\n order = api.set_buy_order(self.currency_pair, (self.buy_price_max \\\n - self.increment * self.nb_orders_to_display),\n self.amount)\n\n new_buy_orders.insert(0, order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n target += 1\n # Browse buy_orders to add or remove orders\n while i < target:\n # Add buy orders when there is no higher buy in buy_orders\n if i + 1 >= len(new_buy_orders):\n order = api.set_buy_order(self.currency_pair, (new_buy_orders[i][2] \\\n + self.increment), self.amount)\n\n new_buy_orders.insert((i + 1), order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # Remove buy order where there is less than increment between them.\n elif new_buy_orders[i + 1][2] - new_buy_orders[i][2] < self.increment:\n resp = api.cancel_order(self.currency_pair, new_buy_orders[i + 1][0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n\n target -= 1\n # Add buy order when there is more than increment between them.\n elif new_buy_orders[i + 1][2] - new_buy_orders[i][2] > self.increment:\n order = api.set_buy_order(self.currency_pair, (new_buy_orders[i][2] \\\n + self.increment), self.amount)\n\n new_buy_orders.insert((i + 1), order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # Increment ok, next round.\n else:\n i += 1\n\n self.buy_orders = new_buy_orders[:]\n\n # Add buy orders when new_buy_orders is empty\n if new_buy_orders == []:\n price_start = self.buy_price_max\n logging.warning('No active buy orders')\n # set the number of buy orders to execute and check if no more than\n # nb_orders_to_display\n if (self.buy_price_max - self.buy_price_min) / self.increment \\\n > self.nb_orders_to_display:\n\n i = int(self.nb_orders_to_display) + 1\n\n else:\n i = int((self.buy_price_max - self.buy_price_min) / self.increment)\n\n # change: simplifying because i is an integer => Decimal(str(i)) should not be needed\n log = i, 'add buy orders from', price_start, 'to', (price_start + i * self.increment)\n logging.warning(log)\n\n buy_orders_executed = api.set_several_buy_orders(self.currency_pair, price_start, \\\n self.amount, i, self.increment)\n\n self.buy_orders = buy_orders_executed[:]", "def on_end_buying_round(self, player_state, round_history):\n pass", "def sell_order_btc(self, asset, price, quantity):\n precision_asset = 8\n precision_unit = 8\n\n symbol_info = self.get_symbol_info(asset)\n baseAsset = symbol_info['baseAsset']\n price_fileter = float(symbol_info['filters'][0]['tickSize'])\n for i in range(1, 9):\n n = price_fileter * (10 ** i)\n if n == 1:\n precision_asset = i\n price_to_sell = \"{:0.0{}f}\".format(price, precision_asset) # price of asset in format\n print(price_to_sell)\n\n quantity_fileter = float(symbol_info['filters'][1]['minQty'])\n for i in range(1, 9):\n n = quantity_fileter * (10 ** i)\n if n == 1:\n precision_unit = i\n quantity_to_sell = \"{:0.0{}f}\".format(quantity, precision_unit) # quantity of asset in format\n print(quantity_to_sell)\n\n current_asset = self.client.get_asset_balance(asset=baseAsset)\n float_asset = float(current_asset['free'])\n if float_asset >= quantity:\n print('Quantity of {} is {} and we have enough to sell {} of {}'.format(baseAsset, float_asset,\n quantity_to_sell, baseAsset))\n if self.test_asset == 0:\n order = self.client.order_limit_sell(\n symbol=asset, #:param symbol: required\n quantity=quantity_to_sell, #:param quantity: required, :type quantity: decimal\n price=price_to_sell, ) #:param price: required,\t:type price: str\n\n print('sell order was placed, price {} and quantity of {} is {} '.format(price_to_sell, asset,\n quantity_to_sell))\n else:\n order = self.client.create_test_order(\n symbol=asset,\n side=self.client.SIDE_SELL,\n type=self.client.ORDER_TYPE_LIMIT,\n timeInForce=self.client.TIME_IN_FORCE_GTC,\n quantity=quantity_to_sell,\n price=price_to_sell)\n\n print('test sell order is done, test_asset = 1')\n else:\n print('Quantity of {} is {} and we have NOT enough to sell {} of {}'.format(baseAsset, float_asset,\n quantity_to_sell, baseAsset))", "def check_sell(self, data={}):\r\n \r\n to_sell = []\r\n rank_dict = {}\r\n for my_position in self.position:\r\n \r\n # compare current_price with value\r\n actual_value = my_position['current_price'] * my_position['num_shares']\r\n bought_value = my_position['total_invested']\r\n # check if current price significantly dropped from bought\r\n if bought_value * (1 - self.stop_loss) >= actual_value:\r\n to_sell.append(my_position)\r\n #rank the coin based on distance from bought value to ensure priority over other sell conditions \r\n rank_dict[my_position['code']] = actual_value - bought_value\r\n elif bought_value * self.profit_take <= actual_value:\r\n to_sell.append(my_position)\r\n # rank the coin based on the gain of selling\r\n rank_dict[my_position['code']] = bought_value - actual_value\r\n elif data[my_position[\"code\"]][\"close\"] >= self.statbot.calc_bands(my_position[\"code\"])[1] and self.statbot.get_rsi(my_position[\"code\"]) >= 70:\r\n diff = abs(data[my_position[\"code\"]][\"close\"] - self.statbot.calc_bands(my_position[\"code\"])[1])\r\n to_sell.append(my_position)\r\n #rank the coin based on the score calculated in get score using difference between bands and rsi\r\n rank_dict[my_position['code']] = self.get_score(SELL, self.statbot.get_rsi(my_position['code']), diff)\r\n \r\n for my_position in to_sell:\r\n self.sell(my_position['code'], my_position['current_price'])\r\n \r\n if len(self.selling) != 0:\r\n # sorts buying based on value of rank\r\n self.selling.sort(key = lambda x : rank_dict[x['code']])", "def __exec_buysell_order(self, order, market, open_exec_price, close_exec_price):\n result = False\n\n self.lock()\n\n base_asset = self.__get_or_add_asset(market.base)\n quote_asset = self.__get_or_add_asset(market.quote)\n\n quote_market = self._markets.get(quote_asset.symbol+quote_asset.quote)\n quote_exec_price = quote_market.price if quote_market else 1.0\n\n if order.direction == Position.LONG:\n # buy\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * open_exec_price # quote_market.adjust_quantity(base_qty * open_exec_price) if quote_market else self.adjust_quantity(base_qty * open_exec_price)\n\n # @todo free quantity\n if quote_qty > quote_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought quote asset quantity for %s with %s (have %s)!\" % (quote_asset.symbol, quote_qty, quote_asset.quantity))\n return False\n\n # retain the fee on the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty += commission_amount\n\n # base asset. it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, open_exec_price, base_qty, True, self.timestamp)\n # quote asset\n self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, False, self.timestamp)\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + full traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': open_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n elif order.direction == Position.SHORT:\n # sell\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * close_exec_price # quote_market.adjust_quantity(base_qty * close_exec_price) if quote_market else self.adjust_quantity(base_qty * close_exec_price)\n\n # @todo free quantity\n if base_qty > base_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought base asset quantity for %s with %s (have %s)!\" % (\n base_asset.symbol, market.format_quantity(base_qty), market.format_quantity(base_asset.quantity)))\n\n return False\n\n # retain the fee from the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty -= commission_amount\n\n # approximation of the profit/loss according to the average price of the base asset\n delta_price = close_exec_price - base_asset.price\n\n # it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, close_exec_price, base_qty, False, self.timestamp)\n # quote asset\n position_gain_loss_currency = self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, True, self.timestamp)\n\n gain_loss_rate = ((close_exec_price - base_asset.price) / base_asset.price) if base_asset.price else 0.0\n position_gain_loss = delta_price * base_qty\n position_gain_loss_currency *= gain_loss_rate\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance, delta_price/market.one_pip_means,\n gain_loss_rate, position_gain_loss, position_gain_loss_currency)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + fully traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': close_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n return result", "def _trade(self, price, quant, obj, nextpos):\r\n\r\n ## trace('__________________ _trade', price, quant, obj, booking)\r\n\r\n # A list of (trade-object, quantity) booked.\r\n booked = []\r\n\r\n # Total size booked during this trade.\r\n total_booked = 0\r\n\r\n # \"Real\" PnL for the booked trades.\r\n real_pnl = 0\r\n\r\n # Book the new trade against existing positions if the trade is not on\r\n # the same side as our current position.\r\n position = self.position()\r\n if quant * position < 0:\r\n\r\n # Process all the positions.\r\n done = 0\r\n while self.positions:\r\n pos = nextpos(self)\r\n if abs(quant) >= abs(pos.size):\r\n # This position is entirely consumed by the trade.\r\n booked_quant = pos.size\r\n self.positions.remove(pos) # This may become slow.\r\n else:\r\n # This position is only partially consumed by the trade.\r\n booked_quant = -quant\r\n pos.size += quant\r\n done = 1\r\n\r\n quant += booked_quant\r\n total_booked += booked_quant\r\n booked.append( (pos.obj, booked_quant) )\r\n\r\n real_pnl += booked_quant * (price - pos.price)\r\n if done or quant == 0:\r\n break\r\n\r\n assert quant * self.position() >= 0\r\n\r\n # Price the booked trades into the realized PnL, depending on method.\r\n if self.pricing_method is PRICING_REAL:\r\n realized_pnl = real_pnl\r\n else:\r\n if position == 0:\r\n assert total_booked == 0, total_booked\r\n realized_pnl = 0\r\n else:\r\n realized_cost = self.L((total_booked*self.F(self.cost4avg))/position)\r\n realized_pnl = total_booked * price - realized_cost\r\n self.cost4avg -= realized_cost\r\n self._realized_pnl += realized_pnl\r\n if total_booked == 0:\r\n assert realized_pnl == 0, realized_pnl\r\n else:\r\n booked.append( (obj, -total_booked) )\r\n \r\n # Append the remainder of our trade to the inventory if not all was\r\n # booked.\r\n if quant != 0:\r\n newpos = Position(self, price, quant, obj)\r\n self.positions.append(newpos)\r\n if self.pricing_method is PRICING_AVERAGE:\r\n self.cost4avg += newpos.cost()\r\n\r\n self.last_trade_price = price\r\n return booked, realized_pnl", "def sellStock(self, asset, amount): # sellStock function \n amount_f = float(amount)\t\t\t\t\t\t\t \n if self.stock < amount_f:\t\t\t\t\t\t\t # if there is not enough stocks to sell\t\n self.tr += \"Failed to sell the stock\"\t\t\t\t # record of failed transaction\n return \"Not enough stocks in portfolio\" \n if amount != int(amount): # if the amount input is not proper\n self.tr += \"Failed to sell the stock\" # record of failed transaction\n return \"You can only sell stocks as whole\"\n else: \n self.stock -= amount_f # subtract from stocks when you can sell\n self.cash += amount_f *20* random.uniform(0.5, 1.5) # add the corr. amount to cash\n # I couldn't figure out how to integrate price here, so I used the price in example\n self.tr += \"Sold {0} Stock with symbol {1}\\n\".format(amount, asset)", "def check_buy(self, data):\r\n for my_position in self.position:\r\n high_price = data[my_position['code']]['high']\r\n value_that_we_have = my_position['value']\r\n \r\n # if one of our stocks has dropped by 5%, buy more of it in the hopes that it will go up\r\n if value_that_we_have * .95 >= high_price:\r\n self.buy(my_position['code'], high_price, my_position['total_invested'] * 0.025)\r\n \r\n rank_dict = {}\r\n # check for new stock\r\n for key in data:\r\n # if key doesnt exist in position \r\n if not any(key in pos for pos in self.position):\r\n diff = abs(data[key]['close'] - self.statbot.calc_bands(key)[0])\r\n if data[key][\"close\"] < self.statbot.calc_bands(key)[0] and self.statbot.get_rsi(key) <= 30:\r\n #access exchange api to purchase more stock\r\n self.add_buy(key, data[key][\"close\"], self.get_buy_amount())\r\n rank_dict[key] = self.get_score(BUY, self.statbot.get_rsi(key), diff)\r\n\r\n # check if buying any\r\n if len(self.buying) != 0:\r\n # sorts buying based on value of rank\r\n self.buying.sort(key = lambda x : -rank_dict[x['code']])", "async def order_oco_sell(self, **params):\r\n return await self.client_helper(\"order_oco_sell\", **params)", "def handleNewOrder( order, event ):\n \n if event.destination != icore.workflow_states.order.finance.CHARGEABLE:\n return\n\n if not event.source in ( icore.workflow_states.order.finance.REVIEWING, ):\n return \n\n if icore.IShippableOrder.providedBy( order ):\n order.shipments = shipment.OrderShipments()\n \n for item in order.shopping_cart.values():\n if not ( icore.IShippableLineItem.providedBy( item ) \n and icore.IPayableLineItem.providedBy( item ) ):\n continue\n\n payable = item.resolve()\n if payable is None:\n continue\n \n inventory = interfaces.IProductInventory( payable )\n inventory.store_stock -= item.quantity\n \n if inventory.store_stock < 0 and (inventory.store_stock + item.quantity) >= 0:\n notify( interfaces.InventoryBackordered( inventory, payable ) )\n\n notify(\n interfaces.InventoryAvailabilityModified( inventory, payable, order, item.quantity )\n )", "def check_for_trade(self):\n\n current_price: float = self.get_symbol_price()\n\n # Get the historal_data and append new current_price for reference.\n current_key: int = self.daily_history_data.keys()[-1]\n current_key += 1\n self.daily_history_data[current_key] = current_price\n\n\n # If we have not yet made any buy orders, then we can't sell anything.\n if self.buy_price == float(0):\n pass\n \n else:\n # If we have a buy price, we need to make sure we sell when we have a profit.\n percent_change: float = float(((current_price - self.buy_price) / self.buy_price) * 100)\n if percent_change > self.percent_sell:\n # Execute a sell order.\n # self.sell_crypto_order(self.get_symbol_positions())\n pass", "def display_stock():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kosdaq stock sell order completed method
def post_koa_normal_sell_kq_ord(self, trcode, rqname, next): self.logger.info("kosdaq stock sell order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_sell_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_buy_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_buy_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def did_complete_sell_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def sell(self, date, shares, price):\n # step 1\n # sell given shares or all\n total_shares = self.totalStockinDepot()\n shares = min(shares, total_shares)\n # step 2\n fee = self.broker.calcFee(shares, price)\n # step 2\n order_volume = shares * price\n # step 4\n self.orderbook.addTransaction(date, 'sell', self.__stock, shares, price, fee)\n # step 5\n self.broker.balance += (order_volume - fee)\n\n # step 6\n tax = self.broker.calcTax(self.orderbook, self.stock)\n if self.__isNaN(tax) :\n tax = 0\n\n if tax > 0 :\n delta_tax = tax - self.TAX\n self.TAX = tax # overall tax\n self.broker.balance -= delta_tax\n else :\n loss_tax = tax\n if tax == 0:\n loss_tax = 0-self.TAX\n self.broker.balance -= max(loss_tax, 0-self.TAX)\n self.TAX += loss_tax", "def __exec_buysell_order(self, order, market, open_exec_price, close_exec_price):\n result = False\n\n self.lock()\n\n base_asset = self.__get_or_add_asset(market.base)\n quote_asset = self.__get_or_add_asset(market.quote)\n\n quote_market = self._markets.get(quote_asset.symbol+quote_asset.quote)\n quote_exec_price = quote_market.price if quote_market else 1.0\n\n if order.direction == Position.LONG:\n # buy\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * open_exec_price # quote_market.adjust_quantity(base_qty * open_exec_price) if quote_market else self.adjust_quantity(base_qty * open_exec_price)\n\n # @todo free quantity\n if quote_qty > quote_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought quote asset quantity for %s with %s (have %s)!\" % (quote_asset.symbol, quote_qty, quote_asset.quantity))\n return False\n\n # retain the fee on the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty += commission_amount\n\n # base asset. it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, open_exec_price, base_qty, True, self.timestamp)\n # quote asset\n self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, False, self.timestamp)\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + full traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': open_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n elif order.direction == Position.SHORT:\n # sell\n base_qty = order.quantity # market.adjust_quantity(order.quantity)\n quote_qty = base_qty * close_exec_price # quote_market.adjust_quantity(base_qty * close_exec_price) if quote_market else self.adjust_quantity(base_qty * close_exec_price)\n\n # @todo free quantity\n if base_qty > base_asset.quantity:\n self.unlock()\n\n # and then rejected order\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_REJECTED, self.name, (order.symbol, order.ref_order_id))\n\n logger.error(\"Not enought base asset quantity for %s with %s (have %s)!\" % (\n base_asset.symbol, market.format_quantity(base_qty), market.format_quantity(base_asset.quantity)))\n\n return False\n\n # retain the fee from the quote asset\n commission_asset = quote_asset.symbol\n\n if order.is_market():\n commission_amount = quote_qty * market.taker_fee\n else:\n commission_amount = quote_qty * market.maker_fee\n\n quote_qty -= commission_amount\n\n # approximation of the profit/loss according to the average price of the base asset\n delta_price = close_exec_price - base_asset.price\n\n # it will receive its own signal (ignored)\n self.__update_asset(order.order_type, base_asset, market, 0, close_exec_price, base_qty, False, self.timestamp)\n # quote asset\n position_gain_loss_currency = self.__update_asset(order.order_type, quote_asset, quote_market, 0, quote_exec_price, quote_qty, True, self.timestamp)\n\n gain_loss_rate = ((close_exec_price - base_asset.price) / base_asset.price) if base_asset.price else 0.0\n position_gain_loss = delta_price * base_qty\n position_gain_loss_currency *= gain_loss_rate\n\n # directly executed quantity\n order.executed = base_qty\n\n # transaction time is current timestamp\n order.transact_time = self.timestamp\n\n result = True\n\n #\n # history\n #\n\n # and keep for history (backtesting reporting)\n history = PaperTraderHistoryEntry(order, self.account.balance, self.account.margin_balance, delta_price/market.one_pip_means,\n gain_loss_rate, position_gain_loss, position_gain_loss_currency)\n self._history.add(history)\n\n # unlock before notify signals\n self.unlock()\n\n #\n # order signal\n #\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'direction': order.direction,\n 'timestamp': order.created_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force\n }\n\n # signal as watcher service (opened + fully traded qty and immediately deleted)\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_OPENED, self.name, (order.symbol, order_data, order.ref_order_id))\n\n order_data = {\n 'id': order.order_id,\n 'symbol': order.symbol,\n 'type': order.order_type,\n 'trade-id': 0,\n 'direction': order.direction,\n 'timestamp': order.transact_time,\n 'quantity': order.quantity,\n 'price': order.price,\n 'stop-price': order.stop_price,\n 'exec-price': close_exec_price,\n 'filled': base_qty,\n 'cumulative-filled': base_qty,\n 'quote-transacted': quote_qty,\n 'stop-loss': order.stop_loss,\n 'take-profit': order.take_profit,\n 'time-in-force': order.time_in_force,\n 'commission-amount': commission_amount,\n 'commission-asset': commission_asset\n }\n\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_TRADED, self.name, (order.symbol, order_data, order.ref_order_id))\n self.service.watcher_service.notify(Signal.SIGNAL_ORDER_DELETED, self.name, (order.symbol, order.order_id, \"\"))\n\n return result", "def soldout():", "def set_orders(self):\n new_buy_orders, new_sell_orders = api.get_orders(self.currency_pair)\n\n # check if the sell book isn't empty\n if new_sell_orders != []:\n log = 'new_sell_orders : ', new_sell_orders # number of new sell orders\n logging.info(log)\n # remove all sell orders under sell_price_min\n if new_sell_orders[0][2] < self.sell_price_min: # order[2] => rate\n for order in new_sell_orders:\n if order[2] < self.sell_price_min:\n resp = api.cancel_order(self.currency_pair, order[0]) # order[0] => order_number\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n # remove orders if there too much of them\n # checking if the rate of the last order is too big than the\n # supposed right rate relatively to both the increment and nb_order_to_display variables\n if new_sell_orders[-1][2] > self.sell_price_min + self.increment * self.nb_orders_to_display:\n # if so, defining a variable corresponding to the right rate\n price_target = self.sell_price_min + self.increment * self.nb_orders_to_display\n\n # removing the order if greater than the supposed right price\n for order in new_sell_orders:\n if order[2] > price_target:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n # if it remain sells orders\n if new_sell_orders != []:\n i = 0\n target = len(new_sell_orders)\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n log = 'new_sell_orders : ', new_sell_orders\n logging.info(log)\n # check if the first item in new_sell_orders is at sell_price_min\n # or add it\n if new_sell_orders[0][2] != self.sell_price_min:\n # api.set_sell_order is not better?\n order = api.set_sell_order(self.currency_pair, self.sell_price_min, self.amount)\n\n new_sell_orders.insert(0, order)\n\n log = 'Sell order added : ', order\n logging.warning(log)\n\n # incrementing target for the while loop? => because the exclusion of the last integer if not?\n target += 1\n # browse sell_orders to add or removes orders\n while i < target:\n # check for overflow\n if new_sell_orders[i][2] + self.increment > self.sell_price_max:\n i = target\n logging.warning('sell_price_max reached')\n\n else:\n # add a sell order if there is no higher sell in sell_orders\n if i + 1 >= len(new_sell_orders): # possible change : less than sign instead of 'greater than'\n order = api.set_sell_order(self.currency_pair, \\\n (new_sell_orders[i][2] + self.increment), self.amount)\n\n new_sell_orders.insert((i + 1), order)\n\n log = 'Added sell order : ', order\n logging.warning(log)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # remove sell order if there is less than increment between them\n elif new_sell_orders[i + 1][2] - new_sell_orders[i][2] \\\n < self.increment:\n\n resp = api.cancel_order(self.currency_pair, new_sell_orders[i + 1][0])\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n\n target -= 1\n # add sell order if there is more than increment between them\n elif new_sell_orders[i + 1][2] - new_sell_orders[i][2] \\\n > self.increment:\n\n order = api.set_sell_order(self.currency_pair, \\\n (new_sell_orders[i][2] + self.increment), self.amount)\n\n new_sell_orders.insert((i + 1), order)\n\n log = 'Added sell order : ', order\n logging.warning(log)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # increment ok, next round\n else:\n i += 1\n\n self.sell_orders = new_sell_orders[:]\n\n if new_sell_orders == []:\n price_start = self.sell_price_min\n\n logging.warning('no active sell orders')\n\n # set the number of sell orders to execute and check if no more than nb_orders_to_display\n # personal note : recheck the meaning of that condition\n if (self.sell_price_max - self.sell_price_min) / self.increment > self.nb_orders_to_display:\n\n i = int(self.nb_orders_to_display) + 1\n\n else:\n i = int((self.sell_price_max - self.sell_price_min) / self.increment)\n\n log = i, 'sell order to add from : ', price_start, 'to', (price_start + i * self.increment)\n logging.warning(log)\n\n sell_orders_executed = api.set_several_sell_orders(self.currency_pair, price_start, \\\n self.amount, i, self.increment)\n\n self.sell_orders = sell_orders_executed[:]\n\n # When there is orders(s) in new_buy_orders\n if new_buy_orders != []:\n log = 'new_buy_orders : ', new_buy_orders\n logging.info(log)\n # Remove orders with price superior to buy_price_max.\n if new_buy_orders[-1][2] > self.buy_price_max:\n for order in new_buy_orders:\n if order[2] > self.buy_price_max:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n # Remove orders with price under our target\n # Why not set 'buy_price_min'? for the comparison\n if new_buy_orders[0][2] < self.buy_price_max - self.increment * self.nb_orders_to_display:\n\n price_target = self.buy_price_max - self.increment * self.nb_orders_to_display\n\n for order in new_buy_orders:\n if order[2] < price_target:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n # If it remain buy(s) order(s)\n if new_buy_orders != []:\n i = 0\n target = len(new_buy_orders)\n # Add a buy order when the price of the first item in new_buy_orders\n # is not good\n # Why not set 'buy_price_min' for the comparison ?\n if new_buy_orders[0][2] != self.buy_price_max - self.increment \\\n * self.nb_orders_to_display:\n order = api.set_buy_order(self.currency_pair, (self.buy_price_max \\\n - self.increment * self.nb_orders_to_display),\n self.amount)\n\n new_buy_orders.insert(0, order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n target += 1\n # Browse buy_orders to add or remove orders\n while i < target:\n # Add buy orders when there is no higher buy in buy_orders\n if i + 1 >= len(new_buy_orders):\n order = api.set_buy_order(self.currency_pair, (new_buy_orders[i][2] \\\n + self.increment), self.amount)\n\n new_buy_orders.insert((i + 1), order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # Remove buy order where there is less than increment between them.\n elif new_buy_orders[i + 1][2] - new_buy_orders[i][2] < self.increment:\n resp = api.cancel_order(self.currency_pair, new_buy_orders[i + 1][0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n\n target -= 1\n # Add buy order when there is more than increment between them.\n elif new_buy_orders[i + 1][2] - new_buy_orders[i][2] > self.increment:\n order = api.set_buy_order(self.currency_pair, (new_buy_orders[i][2] \\\n + self.increment), self.amount)\n\n new_buy_orders.insert((i + 1), order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # Increment ok, next round.\n else:\n i += 1\n\n self.buy_orders = new_buy_orders[:]\n\n # Add buy orders when new_buy_orders is empty\n if new_buy_orders == []:\n price_start = self.buy_price_max\n logging.warning('No active buy orders')\n # set the number of buy orders to execute and check if no more than\n # nb_orders_to_display\n if (self.buy_price_max - self.buy_price_min) / self.increment \\\n > self.nb_orders_to_display:\n\n i = int(self.nb_orders_to_display) + 1\n\n else:\n i = int((self.buy_price_max - self.buy_price_min) / self.increment)\n\n # change: simplifying because i is an integer => Decimal(str(i)) should not be needed\n log = i, 'add buy orders from', price_start, 'to', (price_start + i * self.increment)\n logging.warning(log)\n\n buy_orders_executed = api.set_several_buy_orders(self.currency_pair, price_start, \\\n self.amount, i, self.increment)\n\n self.buy_orders = buy_orders_executed[:]", "async def sell(self, ctx, stock: str, amount: int):\n if not self.trading:\n await ctx.channel.send(embed=self.embed(\"Trading has been disabled currently!\"))\n return\n if ctx.author.id not in self.users:\n await ctx.channel.send(embed=self.embed(\"You need to set your handle using the `+register` command first.\"))\n return\n info = self.db.get_stock(stock)\n rating = await self.cf.get_rating(stock)\n money = self.db.get_balance(ctx.author.id)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock, 0xFF0000))\n return\n owned = 0\n market = 0\n for owner, quantity in info:\n if owner == ctx.author.id:\n owned = quantity\n if owner == -1:\n market = quantity\n if amount <= 0:\n await ctx.channel.send(embed=self.embed(\"You must sell at least 1 stock.\", 0xFF0000))\n return\n if amount > owned:\n await ctx.channel.send(embed=self.embed(\"You cannot sell more stocks than you own.\", 0xFF0000))\n return\n\n profit = self.stock_value(rating) * amount\n self.db.set_balance(ctx.author.id, money + profit)\n self.db.update_holding(ctx.author.id, stock, owned-amount)\n self.db.update_market(stock, market+amount)\n await ctx.channel.send(embed=self.embed(ctx.author.mention+\", Successfully sold %d stocks of **%s** for $%.2f!\"\n % (amount, stock, profit), 0x00FF00))", "def sell_order_btc(self, asset, price, quantity):\n precision_asset = 8\n precision_unit = 8\n\n symbol_info = self.get_symbol_info(asset)\n baseAsset = symbol_info['baseAsset']\n price_fileter = float(symbol_info['filters'][0]['tickSize'])\n for i in range(1, 9):\n n = price_fileter * (10 ** i)\n if n == 1:\n precision_asset = i\n price_to_sell = \"{:0.0{}f}\".format(price, precision_asset) # price of asset in format\n print(price_to_sell)\n\n quantity_fileter = float(symbol_info['filters'][1]['minQty'])\n for i in range(1, 9):\n n = quantity_fileter * (10 ** i)\n if n == 1:\n precision_unit = i\n quantity_to_sell = \"{:0.0{}f}\".format(quantity, precision_unit) # quantity of asset in format\n print(quantity_to_sell)\n\n current_asset = self.client.get_asset_balance(asset=baseAsset)\n float_asset = float(current_asset['free'])\n if float_asset >= quantity:\n print('Quantity of {} is {} and we have enough to sell {} of {}'.format(baseAsset, float_asset,\n quantity_to_sell, baseAsset))\n if self.test_asset == 0:\n order = self.client.order_limit_sell(\n symbol=asset, #:param symbol: required\n quantity=quantity_to_sell, #:param quantity: required, :type quantity: decimal\n price=price_to_sell, ) #:param price: required,\t:type price: str\n\n print('sell order was placed, price {} and quantity of {} is {} '.format(price_to_sell, asset,\n quantity_to_sell))\n else:\n order = self.client.create_test_order(\n symbol=asset,\n side=self.client.SIDE_SELL,\n type=self.client.ORDER_TYPE_LIMIT,\n timeInForce=self.client.TIME_IN_FORCE_GTC,\n quantity=quantity_to_sell,\n price=price_to_sell)\n\n print('test sell order is done, test_asset = 1')\n else:\n print('Quantity of {} is {} and we have NOT enough to sell {} of {}'.format(baseAsset, float_asset,\n quantity_to_sell, baseAsset))", "async def order_oco_sell(self, **params):\r\n return await self.client_helper(\"order_oco_sell\", **params)", "def execute_order(self, event):\n\n if event.type == 'ORDER':\n # Prepare the parameters for the asset order\n asset = event.symbol\n asset_type = \"STK\"\n order_type = event.order_type\n quantity = event.quantity\n direction = event.direction\n\n # Create the Interactive Brokers contract via the passed Order\n # event\n ib_contract = sefl.create_contract(\n asset, asset_type, self.order_routing, self.order_routing,\n self.currency\n )\n # Create the Interactive Brokers order via the passed Order event\n ib_order = self.create_order(\n order_type, quantity, direction\n )\n\n # Use the connection to send the order to IB\n self.tws_conn.placeOrder(\n self.order_id, ib_contract, ib_order\n )\n\n # NOTE: The following line is essential to ensure that orders\n # connect and collect server responses appropriately. In essence a\n # one second delay between filling and returning order details\n # ensures that each order processes optimally. Without this I've\n # witnessed the process crash.\n time.sleep(1)\n\n # Increment the order ID for this ordering session\n self.order_id += 1", "def did_complete_buy_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def notify_purchased(self):\n notify(CheckoutComplete(self.old_cart))", "def handleNewOrder( order, event ):\n \n if event.destination != icore.workflow_states.order.finance.CHARGEABLE:\n return\n\n if not event.source in ( icore.workflow_states.order.finance.REVIEWING, ):\n return \n\n if icore.IShippableOrder.providedBy( order ):\n order.shipments = shipment.OrderShipments()\n \n for item in order.shopping_cart.values():\n if not ( icore.IShippableLineItem.providedBy( item ) \n and icore.IPayableLineItem.providedBy( item ) ):\n continue\n\n payable = item.resolve()\n if payable is None:\n continue\n \n inventory = interfaces.IProductInventory( payable )\n inventory.store_stock -= item.quantity\n \n if inventory.store_stock < 0 and (inventory.store_stock + item.quantity) >= 0:\n notify( interfaces.InventoryBackordered( inventory, payable ) )\n\n notify(\n interfaces.InventoryAvailabilityModified( inventory, payable, order, item.quantity )\n )", "def test_make_order(self):\n df_stock = self.quant.handle_data(self.quant.data[self.symbol], **self.hd_args)\n df_signal = self.quant.create_signal(df_stock, **self.cs_args)\n\n print 'symbol:', self.symbol\n for expire in (False, True):\n print 'expire set:', expire\n\n df_order = self.strategy.make_order(df_stock, df_signal, expire=expire, **self.args)\n df_order['diff'] = df_order['stock0'] - df_order['strike']\n\n print df_order.to_string(line_width=300)\n\n pct_chg = df_order['pct_chg']\n pct_chg = pct_chg[pct_chg < 10]\n print pct_chg.sum(), np.round(pct_chg.mean(), 2),\n print np.round(float(pct_chg[pct_chg > 0].count() / float(pct_chg.count())), 2),\n print np.round(float(pct_chg[pct_chg < 0].count() / float(pct_chg.count())), 2)\n\n print '-' * 100 + '\\n'", "def update_order(self, event):\n if event.type == 'ORDER':\n # event.print_order()\n cur_quantity = self.current_positions[event.symbol]\n if event.direction == 'EXIT':\n assert(cur_quantity != 0)\n assert(event.quantity == 0)\n if cur_quantity > 0:\n event.direction = \"SELL\"\n else:\n event.direction = \"BUY\"\n event.quantity = abs(cur_quantity)\n elif event.direction == \"ALLBUY\":\n current_price = self.bars.get_latest_bar_value(event.symbol, 'close')\n capital = self.current_holdings['cash']\n assert(cur_quantity == 0)\n assert(event.quantity == 0)\n event.quantity = int(capital / current_price)\n event.direction = \"BUY\"\n elif event.direction == \"ALLSELL\":\n current_price = self.bars.get_latest_bar_value(event.symbol, 'close')\n capital = self.current_holdings['cash']\n assert(cur_quantity == 0)\n assert(event.quantity == 0)\n event.quantity = int(capital / current_price)\n event.direction = \"SELL\"\n\n self.update_positions_from_order(event)\n self.update_holdings_from_order(event)", "def _trade(self, price, quant, obj, nextpos):\r\n\r\n ## trace('__________________ _trade', price, quant, obj, booking)\r\n\r\n # A list of (trade-object, quantity) booked.\r\n booked = []\r\n\r\n # Total size booked during this trade.\r\n total_booked = 0\r\n\r\n # \"Real\" PnL for the booked trades.\r\n real_pnl = 0\r\n\r\n # Book the new trade against existing positions if the trade is not on\r\n # the same side as our current position.\r\n position = self.position()\r\n if quant * position < 0:\r\n\r\n # Process all the positions.\r\n done = 0\r\n while self.positions:\r\n pos = nextpos(self)\r\n if abs(quant) >= abs(pos.size):\r\n # This position is entirely consumed by the trade.\r\n booked_quant = pos.size\r\n self.positions.remove(pos) # This may become slow.\r\n else:\r\n # This position is only partially consumed by the trade.\r\n booked_quant = -quant\r\n pos.size += quant\r\n done = 1\r\n\r\n quant += booked_quant\r\n total_booked += booked_quant\r\n booked.append( (pos.obj, booked_quant) )\r\n\r\n real_pnl += booked_quant * (price - pos.price)\r\n if done or quant == 0:\r\n break\r\n\r\n assert quant * self.position() >= 0\r\n\r\n # Price the booked trades into the realized PnL, depending on method.\r\n if self.pricing_method is PRICING_REAL:\r\n realized_pnl = real_pnl\r\n else:\r\n if position == 0:\r\n assert total_booked == 0, total_booked\r\n realized_pnl = 0\r\n else:\r\n realized_cost = self.L((total_booked*self.F(self.cost4avg))/position)\r\n realized_pnl = total_booked * price - realized_cost\r\n self.cost4avg -= realized_cost\r\n self._realized_pnl += realized_pnl\r\n if total_booked == 0:\r\n assert realized_pnl == 0, realized_pnl\r\n else:\r\n booked.append( (obj, -total_booked) )\r\n \r\n # Append the remainder of our trade to the inventory if not all was\r\n # booked.\r\n if quant != 0:\r\n newpos = Position(self, price, quant, obj)\r\n self.positions.append(newpos)\r\n if self.pricing_method is PRICING_AVERAGE:\r\n self.cost4avg += newpos.cost()\r\n\r\n self.last_trade_price = price\r\n return booked, realized_pnl", "def on_end_buying_round(self, player_state, round_history):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a view and its associated object lookup and validator to the server's registry.
def add_view_to_registry(self, view, target_lookup, target_validator): self._view_registry[view] = (target_lookup, target_validator)
[ "def register_view(self, view):\n if isinstance(view, type):\n # Instantiate the view, if needed\n view = view()\n\n class_name = view.__class__.__name__\n if not hasattr(view, \"url\"):\n raise AttributeError(f'{class_name} missing required attribute \"url\"')\n\n if not hasattr(view, \"name\"):\n raise AttributeError(f'{class_name} missing required attribute \"name\"')\n\n if not hasattr(view, \"description\"):\n raise AttributeError(f'{class_name} missing required attribute \"description\"')\n\n view.register(self.app.router)\n _LOGGER.debug(\"View '{}' has been registered.\".format(view.name))", "def register_view(self):\n if 'views' in self.config:\n View.register(self.config['views'])", "def _add_view_object(self, view: VIEW) -> None:\n key = view.iview\n assert key > 0, 'key=%s; view=%s\\n' % (key, view)\n if key in self.views:\n if not view == self.views[key]:\n assert key not in self.views, 'VIEW.iview=%s\\nold=\\n%snew=\\n%s' % (\n key, self.views[key], view)\n else:\n assert key > 0, 'iview=%s view=\\n%s' % (key, view)\n self.views[key] = view\n self._type_to_id_map[view.type].append(key)", "def register_view(self, view, target_lookup, target_validator):\r\n def wrapper(request, *args, **kwargs):\r\n response = view(request, *args, **kwargs)\r\n absolute_uri = self.get_absolute_uri(request)\r\n if absolute_uri:\r\n try:\r\n target_uri = request.build_absolute_uri()\r\n target_object = self.get_target_object(target_uri)\r\n self.validate_target(target_uri, target_object)\r\n response['X-Pingback'] = absolute_uri\r\n except BacklinkServerError:\r\n pass\r\n return response\r\n wrapper = update_wrapper(wrapper, view)\r\n self.add_view_to_registry(wrapper, target_lookup, target_validator)\r\n return wrapper", "def add_view(self, view):\n if view not in self.data.values():\n if view.model not in self.models:\n self.models.append(view.model)\n self.data[view.slug] = view", "def add_view(self,view):\n self._views.append(view)\n self.model.add_property_change_listener(view)", "def registerViewForModel(view, model):\n components.registerAdapter(view, model, interfaces.IView)", "def register(Model, View, name=None):\n if name is None:\n name = Model._meta.verbose_name_plural.lower().replace(' ','')\n RESTAPI.view_by_model[Model] = View\n RESTAPI.name_by_model[Model] = name\n RESTAPI.urls.append(url(r'^%s' % (name,), include(View.urls(),\n namespace='api_%s' % (name,))))", "def _add_view(bp, view_class, view_route, view_loader, methods=[\"POST\"]):\n endpoints = app.config.get(\"RECORDS_REST_ENDPOINTS\", [])\n options = endpoints.get(DOCUMENT_REQUEST_PID_TYPE, {})\n default_media_type = options.get(\"default_media_type\", \"\")\n rec_serializers = options.get(\"record_serializers\", {})\n serializers = {\n mime: obj_or_import_string(func) for mime, func in rec_serializers.items()\n }\n\n blueprint.add_url_rule(\n \"{0}/{1}\".format(options[\"item_route\"], view_route),\n view_func=view_class.as_view(\n view_class.view_name.format(DOCUMENT_REQUEST_PID_TYPE),\n serializers=serializers,\n default_media_type=default_media_type,\n ctx=dict(loader=view_loader),\n ),\n methods=methods,\n )", "def view(self, view):\n self.view.append(view)", "def addView(self, filename='main', path='.'):\n if not path.endswith('/'): path += '/'\n if path != './' or fileExists(getResourcesPath(path)) == False:\n self.addBrowser(path)\n addAndRegisterView(filename, path)", "def add_view(self, pattern, view, **vars):\n self.routes.append((re.compile('^%s$' % pattern), view, vars))", "def add_view(self, view):\n logging.debug(\"Analyzing {0}: {1}\".format(view['id'].encode('utf8'), view['name'].encode('utf8')))\n\n view_record = self._get_view_record(view)\n self._store_view_to_db(view_record)\n \n if 'columns' not in view.keys():\n logging.warn(\"No columns in {0}\".format(view['id'].encode('utf8')))\n raise KeyError(\"No Columns\")\n\n for col in view['columns']:\n col_record = self._get_col_record(col)\n self._store_col_to_db(col_record, view['id'])\n self._store_unnormalized(view_record, col_record)", "def add_view(self, selector, view_type, model):\n\n els = ElQuery(selector, self.request.document)\n if not els.length:\n raise ValueError(\"cannot find el with selector '%s' on document\" % selector)\n q_el = els\n\n #clone to replace later when removed\n #pyquery buggy on cloned though, so after cloning, find q_el again\n cloned_el = q_el.clone()\n if is_server:\n q_el = ElQuery(selector, self.request.document)\n\n view = view_type(self, q_el[0], model)\n\n view.render()\n\n self.views.append((selector, view, cloned_el))", "def register(self, router):\n _LOGGER.debug(\"Attempting to register our view\")\n\n for method in (\"get\", \"post\", \"delete\", \"put\", \"patch\", \"head\", \"options\"):\n handler = getattr(self, method, None)\n\n if not handler:\n _LOGGER.debug(\"Couldn't locate a '{}' handler for the view.\".format(method))\n continue\n\n router.add_route(method, self.url, handler, name=self.name)", "def addViews(self, document, views):\n document.setdefault(\"views\", {})\n for name, data in views.items():\n document[\"views\"][name] = data", "def add(self, view: BaseView, ttl: Optional[Union[int, float]] = None) -> None:\n identifier = getattr(view, id_field(view).field_name)\n key = f\"{underscore(view.__class__.__name__)}:::{identifier}\"\n\n ttl = ttl or self.conn_info.get(\"TTL\") or 300\n\n self.r.psetex(key, int(ttl * 1000), json.dumps(view.to_dict()))", "def register(self, kind, handler):\n\t\tif __debug__: # In production this logging is completely skipped, regardless of logging level.\n\t\t\tif py3 and not pypy: # Where possible, we shorten things to just the cannonical name.\n\t\t\t\tlog.debug(\"Registering view handler.\", extra=dict(type=name(kind), handler=name(handler)))\n\t\t\telse: # Canonical name lookup is not entirely reliable on some combinations.\n\t\t\t\tlog.debug(\"Registering view handler.\", extra=dict(type=repr(kind), handler=repr(handler)))\n\t\t\n\t\t# Add the handler to the pool of candidates. This adds to a list instead of replacing the \"dictionary item\".\n\t\tself._map.add(kind, handler)\n\t\t\n\t\treturn handler", "def registerViewType( self, cls, window = None ):\n if ( not cls in self._viewTypes ):\n self._viewTypes.append(cls)\n \n if ( window ):\n cls.registerToWindow(window)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a view and its object lookup and validator, wrapping the view to provide autodiscovery headers when appropriate.
def register_view(self, view, target_lookup, target_validator): def wrapper(request, *args, **kwargs): response = view(request, *args, **kwargs) absolute_uri = self.get_absolute_uri(request) if absolute_uri: try: target_uri = request.build_absolute_uri() target_object = self.get_target_object(target_uri) self.validate_target(target_uri, target_object) response['X-Pingback'] = absolute_uri except BacklinkServerError: pass return response wrapper = update_wrapper(wrapper, view) self.add_view_to_registry(wrapper, target_lookup, target_validator) return wrapper
[ "def register_view(self, view):\n if isinstance(view, type):\n # Instantiate the view, if needed\n view = view()\n\n class_name = view.__class__.__name__\n if not hasattr(view, \"url\"):\n raise AttributeError(f'{class_name} missing required attribute \"url\"')\n\n if not hasattr(view, \"name\"):\n raise AttributeError(f'{class_name} missing required attribute \"name\"')\n\n if not hasattr(view, \"description\"):\n raise AttributeError(f'{class_name} missing required attribute \"description\"')\n\n view.register(self.app.router)\n _LOGGER.debug(\"View '{}' has been registered.\".format(view.name))", "def add_view_to_registry(self, view, target_lookup, target_validator):\r\n self._view_registry[view] = (target_lookup, target_validator)", "def register_view(self):\n if 'views' in self.config:\n View.register(self.config['views'])", "def registerViewForModel(view, model):\n components.registerAdapter(view, model, interfaces.IView)", "def register(Model, View, name=None):\n if name is None:\n name = Model._meta.verbose_name_plural.lower().replace(' ','')\n RESTAPI.view_by_model[Model] = View\n RESTAPI.name_by_model[Model] = name\n RESTAPI.urls.append(url(r'^%s' % (name,), include(View.urls(),\n namespace='api_%s' % (name,))))", "def _add_view(bp, view_class, view_route, view_loader, methods=[\"POST\"]):\n endpoints = app.config.get(\"RECORDS_REST_ENDPOINTS\", [])\n options = endpoints.get(DOCUMENT_REQUEST_PID_TYPE, {})\n default_media_type = options.get(\"default_media_type\", \"\")\n rec_serializers = options.get(\"record_serializers\", {})\n serializers = {\n mime: obj_or_import_string(func) for mime, func in rec_serializers.items()\n }\n\n blueprint.add_url_rule(\n \"{0}/{1}\".format(options[\"item_route\"], view_route),\n view_func=view_class.as_view(\n view_class.view_name.format(DOCUMENT_REQUEST_PID_TYPE),\n serializers=serializers,\n default_media_type=default_media_type,\n ctx=dict(loader=view_loader),\n ),\n methods=methods,\n )", "def register(\n view: Optional[Type[\"AdminView\"]] = None,\n *,\n admin_site: Optional[AdminSite] = None,\n admin_class: Type[ModelAdmin] = ModelAdminView,\n) -> Union[Type[\"AdminView\"], Callable[[Type[\"AdminView\"]], Type[\"AdminView\"]]]:\n if not admin_site:\n admin_site = site\n\n def wrapped(inner_view: Type[\"AdminView\"]) -> Type[\"AdminView\"]:\n module = inner_view.__module__\n match = re.search(r\"\\.?(\\w+)\\.admin\", module)\n assert match is not None\n app_label = match.group(1)\n app_config = apps.get_app_config(app_label)\n\n label = getattr(inner_view, \"label\", None)\n if not label:\n label = re.sub(\"(Admin)|(View)\", \"\", inner_view.__name__).lower()\n inner_view.label = label\n\n model_name = label.capitalize()\n verbose_name = getattr(inner_view, \"verbose_name\", model_name)\n inner_view.verbose_name = verbose_name\n\n access_perm_codename = \"can_access_\" + model_name.lower()\n access_perm_name = _(\"Can access {verbose_name}\").format(\n verbose_name=verbose_name\n )\n # The first permission here is expected to be\n # the general access permission.\n permissions = tuple(\n [(access_perm_codename, access_perm_name)]\n + list(getattr(inner_view, \"permissions\", []))\n )\n\n model = type(\n model_name,\n (Model,),\n {\n \"__module__\": module + \".__models__\", # Fake\n \"View\": inner_view,\n \"app_config\": app_config,\n \"Meta\": type(\n \"Meta\",\n (object,),\n {\n \"managed\": False,\n \"abstract\": True,\n \"app_label\": app_config.label,\n \"verbose_name\": verbose_name,\n \"verbose_name_plural\": verbose_name,\n \"permissions\": permissions,\n },\n ),\n },\n )\n\n assert admin_site is not None\n admin_site._registry[model] = admin_class(model, admin_site)\n return inner_view\n\n if view is None: # Used as a decorator\n return wrapped\n\n return wrapped(view)", "def register(self, kind, handler):\n\t\tif __debug__: # In production this logging is completely skipped, regardless of logging level.\n\t\t\tif py3 and not pypy: # Where possible, we shorten things to just the cannonical name.\n\t\t\t\tlog.debug(\"Registering view handler.\", extra=dict(type=name(kind), handler=name(handler)))\n\t\t\telse: # Canonical name lookup is not entirely reliable on some combinations.\n\t\t\t\tlog.debug(\"Registering view handler.\", extra=dict(type=repr(kind), handler=repr(handler)))\n\t\t\n\t\t# Add the handler to the pool of candidates. This adds to a list instead of replacing the \"dictionary item\".\n\t\tself._map.add(kind, handler)\n\t\t\n\t\treturn handler", "def _add_view_object(self, view: VIEW) -> None:\n key = view.iview\n assert key > 0, 'key=%s; view=%s\\n' % (key, view)\n if key in self.views:\n if not view == self.views[key]:\n assert key not in self.views, 'VIEW.iview=%s\\nold=\\n%snew=\\n%s' % (\n key, self.views[key], view)\n else:\n assert key > 0, 'iview=%s view=\\n%s' % (key, view)\n self.views[key] = view\n self._type_to_id_map[view.type].append(key)", "def testRegisterViewWithAlreadyRegisteredView(self):\r\n self.views.register_view(self.mockView)\r\n self.assertIn(self.mockView, self.views.views)\r\n self.assertRaises(ViewsException, self.views.register_view, self.mockView)", "def register(self, router):\n _LOGGER.debug(\"Attempting to register our view\")\n\n for method in (\"get\", \"post\", \"delete\", \"put\", \"patch\", \"head\", \"options\"):\n handler = getattr(self, method, None)\n\n if not handler:\n _LOGGER.debug(\"Couldn't locate a '{}' handler for the view.\".format(method))\n continue\n\n router.add_route(method, self.url, handler, name=self.name)", "def add_view(self,view):\n self._views.append(view)\n self.model.add_property_change_listener(view)", "def registerViewType( self, cls, window = None ):\n if ( not cls in self._viewTypes ):\n self._viewTypes.append(cls)\n \n if ( window ):\n cls.registerToWindow(window)", "def add_view(self, view):\n if view not in self.data.values():\n if view.model not in self.models:\n self.models.append(view.model)\n self.data[view.slug] = view", "def view(self, view):\n self.view.append(view)", "def wrapped_view(view, **kwargs):\n\n @functools.wraps(view)\n def django_view(request, **dj_kwargs):\n kwargs['request'] = request\n return wrap_to_request(view(**dj_kwargs), **kwargs)\n\n return django_view", "def test_helper_view_registration(self):\n view = api.content.get_view(\n name='barra_helper',\n context=self.portal,\n request=self.portal.REQUEST,\n )\n view = view.__of__(self.portal)\n self.failUnless(view)", "def testRegisterViewWithUnregisteredView(self):\r\n self.assertNotIn(self.mockView, self.views.views)\r\n self.views.register_view(self.mockView)\r\n self.assertIn(self.mockView, self.views.views)", "def register(self, func):\n self.viewlet_func = func\n self.viewlet_func_args = getargspec(func).args\n\n if not self.name:\n self.name = getattr(func, 'func_name', getattr(func, '__name__'))\n\n func_argcount = len(self.viewlet_func_args) - 1\n if self.timeout:\n # TODO: HASH KEY\n self.key = u'viewlet:%s(%s)' % (self.name, ','.join(['%s' for _ in range(0, func_argcount)]))\n self.key_mod = func_argcount > 0\n self.library.add(self)\n\n def call_with_refresh(*args, **kwargs):\n return self.call(*args, **kwargs)\n setattr(call_with_refresh, 'refresh', self.refresh)\n\n return call_with_refresh" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look up a target object from an absolute URI.
def get_target_object(self, target_uri, *args, **kwargs): view, args, kwargs = self.lookup_view(target_uri) try: target_lookup, target_validator = self._view_registry[view] except KeyError: raise BacklinkTargetNotPingable try: return target_lookup(*args, **kwargs) except ObjectDoesNotExist: raise BacklinkTargetDoesNotExist
[ "def find_target(request, pk):\n target = Target.objects.get(pk=pk)\n\n # We only let users get their own targets, unless a superuser.\n if target.user == request.user or request.user.is_superuser:\n return target\n else:\n raise ValueError(\"Accessing target %d not allowed\" % pk)", "def get_via_uri(self, uri, request=None):\n prefix = get_script_prefix()\n \n chomped_uri = uri\n\n if prefix and chomped_uri.startswith(prefix):\n chomped_uri = chomped_uri[len(prefix)-1:]\n\n try:\n view, args, kwargs = resolve(chomped_uri)\n except Resolver404:\n raise NotFound(\"The URL provided '%s' was not a link to a valid resource.\" % uri)\n\n return self.obj_get(request=request, **self.remove_api_resource_names(kwargs))", "def lookup(self, name_or_url):\n result = self.get(name_or_url)\n\n if result is None:\n result = Mirror(fetch_url=name_or_url)\n\n return result", "def get_object_from_pk_or_uri(request, pk):\n try:\n instance = TempEntityClass.objects_inheritance.get_subclass(pk=pk)\n return instance\n except TempEntityClass.DoesNotExist:\n domain = BASE_URI\n new_uri = f\"{domain}entity/{pk}/\"\n uri2 = Uri.objects.filter(uri=new_uri)\n if uri2.count() == 1:\n instance = TempEntityClass.objects_inheritance.get_subclass(\n pk=uri2[0].entity_id\n )\n elif uri2.count() == 0:\n temp_obj = get_object_or_404(Uri, uri=new_uri[:-1])\n instance = TempEntityClass.objects_inheritance.get_subclass(\n pk=temp_obj.entity_id\n )\n else:\n raise Http404\n return instance", "def url(self, ns, target):\n ns, url, title = self[ns]\n maxargnum = max([0] + [int(a[1:]) for a in\n re.findall(InterWikiMap._argspec_re, url)])\n target, query, fragment = split_url_into_path_query_fragment(target)\n if maxargnum > 0:\n args = target.split(':', (maxargnum - 1))\n else:\n args = [target]\n url = self._expand_or_append(url, args)\n ntarget, nquery, nfragment = split_url_into_path_query_fragment(url)\n if query and nquery:\n nquery = '%s&%s' % (nquery, query[1:])\n else:\n nquery = nquery or query\n nfragment = fragment or nfragment # user provided takes precedence\n expanded_url = ntarget + nquery + nfragment\n expanded_title = self._expand(title, args)\n if expanded_title == title:\n expanded_title = _(\"%(target)s in %(name)s\",\n target=target, name=title)\n return expanded_url, expanded_title", "def parse(cls, uri):\n match = _URI_FORMAT.search(uri)\n return cls(match.group(1), match.group(2), match.group(3), match.group(4), match.group(5))", "def _get(self, subpath: str, target_class: type = None, target_module: Union[Module, Symbolic_self] = 'self', target_object = None) -> 'target_class':\n # Convert the symbolic 'self' into the actual self\n if target_module == 'self':\n target_module = self\n target_module = cast(Module, target_module)\n\n return self._request('get', subpath, None, target_class, target_module, target_object)", "def reassociate(self, target, source=None, absolute_path=True):\n # First let's convert to abs path if necessary\n if absolute_path:\n if os.path.exists(target):\n target = os.path.abspath(target)\n if source is not None and os.path.exists(source):\n source = os.path.abspath(source)\n\n # Now, did we pass a source for uri to replace?\n if source is None:\n source = compute_fast_sha(target)\n\n # Ok now let's get all associated uri that match\n # Fist assuming it's a fast_sha\n matches = list(self.find(fast_sha=source, ids_only=True))\n # Now it could be simply a uri\n matches += list(self.find(uri=source, ids_only=True))\n\n # And it's quite possible it's a long_sha too\n matches += list(self.find(long_sha=source, ids_only=True))\n\n # And now let's do the work\n for match_id in matches:\n match = self.__store__._load(match_id)\n match.uri = target", "def resolve(self, base_uri: URI) -> URI:\n uri = object.__new__(URI)\n uri._uriref = self._uriref.resolve_with(base_uri._uriref)\n return uri", "def get_entity(self, uri: str) -> OntologyClass:\n return self.entities.get(str(uri), None)", "def get_uri(self, request):", "def parse_uri(self, uri):\n return self.parse(HTTPCache(uri).content())", "def query(self, uri):\n for handler in self.handlers:\n if handler.detect(uri):\n return handler\n raise UnsupportedURI(\"No registered URI handler supports %r\" % uri)", "def get_by_uri(cls, uri):\n results = cls._get_all_by_uris([uri])\n return results[0] if len(results) > 0 else None", "def get_target(target_type):\n try:\n app_label = APP_LABEL_MAPPING[target_type]\n\n return ContentType.objects.get(app_label=app_label, model=target_type)\n except (KeyError, ContentType.DoesNotExist) as exc:\n raise TargetDoesNotExist() from exc", "def is_referenced(self, uri):", "def get_target_from_inst(inst_target_name, RDK):\n r = RE_TARGET.search(inst_target_name)\n if not r:\n return None\n\n target_name = r.group(1)\n target = RDK.Item(target_name, robolink.ITEM_TYPE_TARGET)\n if not target.Valid(True) or target.Name() != target_name:\n return None\n\n return target", "def target(self):\n\n key = self.__entity.get(\"target_key\")\n return datastore.Get(key)", "def _get_target(self, idx):\n if self.in_memory:\n return self.targets[idx]\n\n image_id = get_image_id(self.image_filenames[idx])\n target = self._read_target(image_id)\n return target" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a target object.
def validate_target(self, target_uri, target_object): view, args, kwargs = self.lookup_view(target_uri) try: target_lookup, target_validator = self._view_registry[view] if not target_validator(target_uri, target_object): raise BacklinkTargetNotPingable except KeyError: raise BacklinkTargetNotPingable
[ "def validate_object(self, objectString, target, isStatus = False):\n # example 5 > 2\n targetObject = target \n\n if objectString.isdigit():\n return True\n \n #list of valid terms other than digit i.e d20\n if re.search(self._regexForDice, objectString) is not None:\n return True\n\n #special case where the target is the point\n if target == \"point\":\n targetObject = self._entityTarget\n \n if isStatus != False:\n return self.is_valid_status(target, objectString)\n\n # entity.hp > 4\n if objectString.find(targetObject) != self._cannotFindSubstring and objectString.find(\".\") != self._cannotFindSubstring:\n dotSplitter = objectString.find(\".\")\n ourObject = objectString[:dotSplitter]\n objectAttribute = objectString[dotSplitter + 1:]\n if ourObject == \"target\" or ourObject == \"entity\" or ourObject == \"self\":\n return self.validate_generic_object_attribute(ourObject.strip(), objectAttribute.strip())\n else:\n return self.validate_object_attribute_or_action(ourObject.strip(), objectAttribute.strip())\n \n if objectString in self._variables_list:\n return True\n\n if objectString == \"target\" or objectString == \"self\" or objectString == \"entity\":\n return True\n\n return False", "def validate(self, *args):\n pass", "def _validate_model(self, session, obj):\n pass", "def validate(self, model_output_shape: Tuple, target_shape: Tuple) -> None:\n raise NotImplementedError", "def test_validate_invalid_object(self):\n G.VALIDATOR.validate(Job('test_invalid_object.json'))", "def test_validate_bad_data(self):\n mocked_target_object = mommy.make(\"tasking.Task\")\n\n bad_target_id = OrderedDict(\n name=\"Cow price\",\n description=\"Some description\",\n start=timezone.now(),\n total_submission_target=10,\n timing_rule=\"RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5\",\n target_content_type=self.task_type.id,\n target_id=1337,\n )\n\n self.assertFalse(TaskSerializer(data=bad_target_id).is_valid())\n\n bad_content_type = OrderedDict(\n name=\"Cow price\",\n description=\"Some description\",\n start=timezone.now(),\n total_submission_target=10,\n timing_rule=\"RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5\",\n target_content_type=\"foobar\",\n target_id=mocked_target_object.id,\n )\n\n self.assertFalse(TaskSerializer(data=bad_content_type).is_valid())\n\n bad_start_date = OrderedDict(\n name=\"Cow Price\",\n description=\"Some Description\",\n start=timezone.now(),\n end=timezone.now() - timedelta(1),\n target_content_type=self.task_type.id,\n target_id=mocked_target_object.id,\n )\n\n self.assertFalse(TaskSerializer(data=bad_start_date).is_valid())", "def validate_data(\n cls, data: Union[dict, list], context: str = None, on_error: callable = None\n ):\n try:\n cls.parse_obj(data, context)\n return True\n except ValidationError as e:\n if on_error:\n for error in e.errors():\n on_error(error)\n\n return False", "def test_target_check(self):\n # failing to provide a target raises an attribute error\n with self.assertRaises(AttributeError):\n class Cuirass(item.Equippable):\n \"\"\"whoops I forgot the target\"\"\"\n\n # providing a target with the wrong type raises an error\n with self.assertRaises(TypeError):\n class Greaves(item.Equippable):\n target = \"whoops not proper target\"\n\n # inheriting from a class with a valid EquipTarget is sufficient\n class Headwear(item.Equippable):\n target = inv.EquipTarget(\"Head\")\n\n class Hat(Headwear):\n pass\n\n # inheriting from a class and overriding the target with a\n # value of the wrong type will cause an error\n with self.assertRaises(TypeError):\n class Helmet(item.Equippable):\n target = \"whoops not a proper target\"", "def _verifyObjectPaste(self, object, validate_src=1):\n pathres = getattr(object, \"relationshipManagerPathRestriction\", None)\n if pathres and \"/\".join(self.getPhysicalPath()).find(pathres) == -1:\n raise CopyError(\n MessageDialog(\n title=\"Not Supported\",\n message=\"The object <EM>%s</EM> can not be pasted into\"\n \" the path <EM>%s</EM>\"\n % (object.id, \"/\".join(self.getPhysicalPath())),\n action=\"manage_main\",\n )\n )\n # We don't need this it checks for meta_type permissions\n # the check messes up zenhubs ability to rename devices\n # CopyContainer._verifyObjectPaste(self,object,validate_src)", "def valid_object(obj):\n return cmds.objExists(obj)", "def test_invalid_object():\n with pytest.raises(ValidationError) as error:\n Yara(\n name='Zeus C2',\n labels=['malicious-activity'],\n description='This is how C2 URLs for Zeus usually end.',\n pattern=VALID_RULE,\n # valid_from='2016-01-01T00:00:00Z',\n # valid_until='2017-01-01T00:00:00Z'\n )\n assert 'No values for required properties' in str(error.value)\n assert 'valid_from' in str(error.value)", "def validateTarget(self):\n\n self.PRODUCTS = self.getConfigValue('general', 'products').split()\n self.DEPLOYMENTS = self.getConfigValue('general', 'deployments').split()\n self.S3_BUCKET = self.getConfigValue('general', 's3_bucket')\n\n if searchList(self.oCmdOptions.sProduct, self.PRODUCTS) is False:\n errorMsg(\"invalid product: %s, valid products are: %s\" %\n (self.oCmdOptions.sProduct, \", \".join(self.PRODUCTS)))\n\n if searchList(self.oCmdOptions.sDeployment, self.DEPLOYMENTS) is False:\n errorMsg(\"invalid deployment: %s, valid deployments are: %s\" %\n (self.oCmdOptions.sDeployment, \", \".join(self.DEPLOYMENTS)))\n\n self.CF_DIST_ID = self.getConfigValue('cloudfront-' + self.oCmdOptions.sProduct,\n self.oCmdOptions.sDeployment + '-dist-id')\n\n # Connect to S3 with the configured credentials and validate\n sId = os.environ.get('AWS_S3_DEPLOY_ACCESS_ID') or self.getConfigValue('aws-credentials', 'access_id')\n sKey = os.environ.get('AWS_S3_DEPLOY_SECRET_KEY') or self.getConfigValue('aws-credentials', 'secret_key') \n self.oBoto = boto3.client('s3', aws_access_key_id=sId, aws_secret_access_key=sKey)\n try:\n statusMsg(\"Validating AWS credentials\")\n self.oBoto.list_objects_v2(Bucket=self.S3_BUCKET, MaxKeys=1)\n except ClientError as e:\n awsError(e)\n self.oBotoCF = boto3.client('cloudfront', aws_access_key_id=sId, aws_secret_access_key=sKey)", "def _validate(self):\n self._validate_data_type()\n self._validate_characters()\n self._validate_regexp()\n logger.debug(\"Instance attributes passed validation.\")", "def validateTargetRule(self,targetRule):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/targetrules/validate\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\tself.client.withResourceUrl(url).withBody(targetRule).execute();", "def _validate(cls):\n\n # Capture any validation issues for use later when raising\n invalid_msg = \"\"\n # Define which \"input\" arg properties we want to check\n input_properties = {\"fixable\": cls.fixable,\n \"fixed\": cls.fixed,\n \"limits\": cls.limits,\n \"opts\": cls.opts,\n \"units\": cls.units,\n \"visible\": cls.visible}\n # Check if all the attributes have a default value for each input param\n for arg in cls.input_names:\n for name, prop in input_properties.items():\n if prop and arg not in prop:\n pass # Do we want to enforce input default values?\n\n # Check if there is a 1:1 mapping from user-specified input_names to function args\n num_names = len(cls.input_names)\n num_args = len(inspect.signature(cls._func).parameters.keys())\n if num_names != num_args:\n invalid_msg += (f\"Number of input_names given ({num_names}) \"\n f\"must match number of inputs for the operation ({num_args}).\")\n # Check if there are any input args that are not actually defined in the operation\n # e.g. 'x' is not a valid input in the case below:\n # @visible('x')\n # def func(a): return\n for name, prop in input_properties.items():\n for arg in prop.keys():\n if arg not in cls.input_names:\n invalid_msg += f\"\\\"{arg}\\\" is not a valid input for \\\"{name}\\\". \"\n\n # Warn if there are no output_names defined\n if not len(cls.output_names):\n warning_msg = (f\"No output_names have been specified for your operation {cls}; \"\n f\"you will not be able to connect your operation's output(s) to \"\n f\"any other operations.\")\n msg.logMessage(warning_msg, level=msg.WARNING)\n\n # Define which \"output\" arg properties we want to check\n output_properties = {\"output_shape\": cls.output_shape}\n # Check if there are any output args that are not actually defined in the operation\n for name, prop in output_properties.items():\n for arg in prop.keys():\n if arg not in cls.output_names:\n invalid_msg += f\"\\\"{arg}\\\" is not a valid output for \\\"{name}\\\". \"\n\n if invalid_msg:\n raise ValidationError(cls, invalid_msg)\n else:\n msg.logMessage(f\"All args for {cls} are valid.\")", "def validate(self):\n if self.skip_validation:\n return\n\n # make sure there are no unrecognized properties\n for property_name in self.properties:\n if property_name not in type(self).possible_properties:\n self.die(\"has unknown property '{0}'\".format(\n property_name\n ))\n\n # make sure that all required properties are present\n for property_name in type(self).required_properties:\n if property_name not in self.properties:\n self.die(\"must have property '{0}'\".format(\n property_name\n ))", "def _validate_passed_object_or_command_parameters(self):\n if self._validated_embedded_parameters:\n return # Validate parameters only once\n if self.cmd_object and self.cmd_class_name:\n # _validate_start is called before running command on connection, so we raise exception instead\n # of setting it\n raise CommandFailure(\n self,\n \"Both 'cmd_object' and 'cmd_class_name' parameters were provided. Please specify only one.\"\n )\n if self.cmd_object and self.cmd_object.done():\n # _validate_start is called before running command on connection, so we raise exception\n # instead of setting it\n raise CommandFailure(\n self,\n \"Not allowed to run again the embedded command (embedded command is done): {}.\".format(\n self.cmd_object))\n if not self.cmd_object:\n self._finish_on_final_prompt = True\n self._validated_embedded_parameters = True", "def is_target_valid(self, targetID):\n cur = self.conn.cursor()\n cur.execute('SELECT * FROM targets WHERE id=? limit 1', [targetID])\n results = cur.fetchall()\n cur.close()\n return len(results) > 0", "def validate_target(target):\n # target_id\n if isinstance(target, Role):\n target_id = target.id\n target_type = PermissionOverwriteTargetType.role\n \n elif isinstance(target, ClientUserBase):\n target_id = target.id\n target_type = PermissionOverwriteTargetType.user\n \n else:\n target_id = maybe_snowflake(target)\n if (target_id is None):\n raise TypeError(\n f'`target` can be `int`, `{Role.__name__}`, `{ClientUserBase.__name__}`, got '\n f'{target.__class__.__name__}; {target!r}.'\n )\n \n target_type = PermissionOverwriteTargetType.unknown\n \n return target_id, target_type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform XMLRPC (de)serialization of the request and called ping method.
def xmlrpc_dispatch(self, request): try: params, method = xmlrpclib.loads(request.raw_post_data) if method != 'pingback.ping': raise Exception('Method "%s" not supported' % method) source_uri, target_uri = params response = self.register_ping(source_uri, target_uri) response = (response,) response = xmlrpclib.dumps(response, methodresponse=1, allow_none=0, encoding='utf-8') except xmlrpclib.Fault, fault: response = xmlrpclib.dumps(fault, allow_none=0, encoding='utf-8') except: import sys exc_type, exc_value, exc_tb = sys.exc_info() response = xmlrpclib.dumps( xmlrpclib.Fault(1, '%s:%s' % (exc_type, exc_value)), encoding='utf-8', allow_none=0, ) return response
[ "def do_POST(self):\n\t\ttry:\n\t\t\t# get arguments\n\t\t\tdata = self.rfile.read(int(self.headers[\"content-length\"]))\n\t\t\t# In previous versions of SimpleXMLRPCServer, _dispatch\n\t\t\t# could be overridden in this class, instead of in\n\t\t\t# SimpleXMLRPCDispatcher. To maintain backwards compatibility,\n\t\t\t# check to see if a subclass implements _dispatch and dispatch\n\t\t\t# using that method if present.\n\t\t\tresponse = self.server._marshaled_dispatch(data, getattr(self, '_dispatch', None))\n\t\texcept: # This should only happen if the module is buggy\n\t\t\t# internal error, report as HTTP server error\n\t\t\tself.send_response(500)\n\t\t\tself.end_headers()\n\t\telse:\n\t\t\t# got a valid XML RPC response\n\t\t\tself.send_response(200)\n\t\t\tself.send_header(\"Content-type\", \"text/xml\")\n\t\t\tself.send_header(\"Content-length\", str(len(response)))\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(response)\n\n\t\t\t# shut down the connection\n\t\t\tself.wfile.flush()\n\t\t\tself.connection.shutdown() # Modified here!", "def do_POST(self):\r\n\r\n # Check that the path is legal\r\n if not self.is_rpc_path_valid():\r\n self.report_404()\r\n return\r\n\r\n try:\r\n # Get arguments by reading body of request.\r\n # We read this in chunks to avoid straining\r\n # socket.read(); around the 10 or 15Mb mark, some platforms\r\n # begin to have problems (bug #792570).\r\n max_chunk_size = 10*1024*1024\r\n size_remaining = int(self.headers[\"content-length\"])\r\n L = []\r\n while size_remaining:\r\n chunk_size = min(size_remaining, max_chunk_size)\r\n L.append(self.rfile.read(chunk_size))\r\n size_remaining -= len(L[-1])\r\n data = ''.join(L)\r\n\r\n # In previous versions of SimpleXMLRPCServer, _dispatch\r\n # could be overridden in this class, instead of in\r\n # SimpleXMLRPCDispatcher. To maintain backwards compatibility,\r\n # check to see if a subclass implements _dispatch and dispatch\r\n # using that method if present.\r\n response = self.server._marshaled_dispatch(\r\n data, getattr(self, '_dispatch', None)\r\n )\r\n except: # This should only happen if the module is buggy\r\n # internal error, report as HTTP server error\r\n self.send_response(500)\r\n self.end_headers()\r\n else:\r\n # got a valid XML RPC response\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/xml\")\r\n self.send_header(\"Content-length\", str(len(response)))\r\n self.end_headers()\r\n self.wfile.write(response)\r\n\r\n # shut down the connection\r\n self.wfile.flush()\r\n self.connection.shutdown(1)", "def ping(self, method=\"GET\", data=None, headers=None):\n return self._xjtrans(self.uri_prefix + \"/ping\", method, data, True, custom_headers=headers)", "def test_xmlrpc(self):\n try:\n self._run_test(\"pelix.remote.xml_rpc\",\n pelix.remote.FACTORY_TRANSPORT_XMLRPC_EXPORTER,\n pelix.remote.FACTORY_TRANSPORT_XMLRPC_IMPORTER,\n False)\n except queue.Empty:\n # Process error\n self.fail(\"Remote framework took to long to reply\")", "def execute_rpc(self, dummy=None, command=None, **kvargs):\n if command is None:\n raise TobyException(\"Mandatory argument 'command' is missing!\", host_obj=self)\n\n self.log(level='INFO', message='Executing rpc :')\n rpc_cmd_log = command\n try:\n rpc_cmd_log = etree.tostring(command)\n t.log(level='INFO', message=str(rpc_cmd_log))\n except Exception:\n pass\n\n prev_timeout = self.channels['pyez'].timeout\n if 'timeout' in kvargs:\n timeout = kvargs.pop('timeout')\n self.channels['pyez'].timeout = timeout\n\n if 'ignore_rpc_error' in kvargs and kvargs['ignore_rpc_error']:\n try:\n result = self.channels['pyez'].execute(command, **kvargs)\n except RpcError as ex:\n #result = ex.xml\n error_format = kvargs.get('error_format', '')\n if error_format == 'list':\n result = ex.errs\n else:\n result = ex.rsp\n else:\n result = self.channels['pyez'].execute(command, **kvargs)\n\n if 'timeout' in kvargs:\n self.channels['pyez'].timeout = prev_timeout\n\n self.log(level='INFO', message='rpc reply is :')\n if isinstance(result, etree._Element):\n xml = xml_dom.parseString(etree.tostring(result))\n pretty_xml = xml.toprettyxml()\n self.log(level='INFO', message=pretty_xml)\n else:\n self.log(level='INFO', message=result)\n return_value = Response(response=result, status=True)\n return return_value", "def _call(self, method, params=None, request_id=None):\n params = params or []\n\n # Determines which 'id' value to use and increment the counter associated with the current\n # client instance if applicable.\n rid = request_id or self._id_counter\n if request_id is None:\n self._id_counter += 1\n\n # Prepares the payload and the headers that will be used to forge the request.\n payload = {'jsonrpc': '2.0', 'method': method, 'params': params, 'id': rid}\n headers = {'Content-Type': 'application/json'}\n scheme = 'https' if self.tls else 'http'\n url = '{}://{}:{}'.format(scheme, self.host, self.port)\n\n # Calls the JSON-RPC endpoint!\n try:\n response = self.session.post(url, headers=headers, data=json.dumps(payload))\n response.raise_for_status()\n except HTTPError:\n raise TransportError(\n 'Got unsuccessful response from server (status code: {})'.format(\n response.status_code),\n response=response)\n\n # Ensures the response body can be deserialized to JSON.\n try:\n response_data = response.json()\n except ValueError as e:\n raise ProtocolError(\n 'Unable to deserialize response body: {}'.format(e), response=response)\n\n # Properly handles potential errors.\n if response_data.get('error'):\n code = response_data['error'].get('code', '')\n message = response_data['error'].get('message', '')\n raise ProtocolError(\n 'Error[{}] {}'.format(code, message), response=response, data=response_data)\n elif 'result' not in response_data:\n raise ProtocolError(\n 'Response is empty (result field is missing)', response=response,\n data=response_data)\n\n return response_data['result']", "def ParseRPCReq(self, msgid: RpcMsgType, q: Queue):\n self.logger.info(f'Got RPC msg id: {msgid}')\n\n if msgid == RpcMsgType.TYPE_NODE_INFO:\n rsp = self.GetBasicNodeStats()\n q.put(rsp)\n elif msgid == RpcMsgType.TYPE_SCHEDULER_INFO:\n rsp = self.failed_schedule_count\n q.put(rsp)\n elif msgid == RpcMsgType.TYPE_POD_INFO:\n rsp = self.GetPodStats()\n q.put(rsp)", "def do_POST(myself):\n try:\n # get arguments\n data = myself.rfile.read(int(myself.headers[\"content-length\"]))\n # In previous versions of SimpleXMLRPCServer, _dispatch\n # could be overridden in this class, instead of in\n # SimpleXMLRPCDispatcher. To maintain backwards compatibility,\n # check to see if a subclass implements _dispatch and dispatch\n # using that method if present.\n response = myself.server._marshaled_dispatch(data, getattr(myself, '_dispatch', None))\n except Exception as info: # This should only happen if the module is buggy\n print (\"ERROR do_POST: \", info)\n print (\"Traceback follows:\", traceback.print_exc())\n\n # internal error, report as HTTP server error\n myself.send_response(500)\n myself.end_headers()\n else:\n # got a valid XML RPC response\n myself.send_response(200)\n myself.send_header(\"Content-type\", \"text/xml\")\n myself.send_header(\"Content-length\", str(len(response)))\n myself.end_headers()\n myself.wfile.write(response)\n\n # shut down the connection\n myself.wfile.flush()\n myself.connection.shutdown() # Modified here!", "def pingTest(self):", "def my_xmlrpclib_loads(data):\n p, u = xmlrpclib.getparser()\n p.feed(data)\n p.close()\n return u.close(), u.getmethodname()", "def request(self: JsonRpcClient, request_object: Request) -> Response:\n formatted_request = request_to_json_rpc(request_object)\n response = requests.post(self.url, json=formatted_request)\n # TODO: error checking here - raise if the response from server was error?\n # OR just return a Response object with ResponseStatus.ERROR?\n return json_to_response(response.json())", "def do_request(self, method, params=None):\n request_json = {\n 'jsonrpc':'2.0',\n 'method': method,\n 'params': params or {},\n 'auth': self.auth,\n 'id': '1',\n }\n\n logger.debug('urllib2.Request({0}, {1})'.format(self.url,json.dumps(request_json)))\n req = urllib2.Request(self.url, json.dumps(request_json))\n req.get_method = lambda: 'POST'\n req.add_header('Content-Type', 'application/json-rpc')\n\n try:\n res = urllib2.urlopen(req)\n response_json = json.load(res)\n except ValueError:\n raise ZabbixAPIException(\"Unable to parse json: %\" % res)\n\n logger.debug(\"Response Body: %s\" % json.dumps(response_json, indent=4,\n separators=(',', ': ')))\n\n if 'error' in response_json:\n msg = \"Error {code}: {message}, {data} while sending {json}\".format(\n code=response_json['error']['code'],\n message=response_json['error']['message'],\n data=response_json['error']['data'],\n json=str(request_json)\n )\n raise ZabbixAPIException(msg, response_json['error']['code'])\n\n return response_json", "def rpc(self, rpc, args):\n\n reply = self._session._dbs.pilot_rpc(self.uid, self.uid, rpc, args)\n\n return reply", "def test_ping_get(self):\n pass", "def parse_request(self, request_body: str) -> RequestData:\n try:\n params, method = xmlrpc_client.loads(\n request_body, use_builtin_types=self.use_builtin_types\n )\n\n except ExpatError as exc:\n raise RPCParseError(f\"Error while parsing XML-RPC request: {exc}\") from exc\n\n except Exception as exc:\n raise RPCInvalidRequest(\"The request appear to be invalid.\") from exc\n\n else:\n if not method:\n raise RPCInvalidRequest(\n \"Missing methodName. Please provide the name of the procedure you want to call\"\n )\n return params, method", "def setupXMLRPC(self):\n self.xmlrpc_server = SimpleXMLRPCServer((\"\", 8002), requestHandler=RequestHandler)\n \n self.xmlrpc_server.register_function(self.register_qarsp, 'register')\n self.xmlrpc_server.register_function(self.missionComplete_qarsp, 'missionComplete')\n self.xmlrpc_server.register_function(self.updateMap_qarsp, 'updateMap')\n \n self.xmlrpc_thread = threading.Thread(target=self.xmlrpc_server.serve_forever)\n self.xmlrpc_thread.start()", "def exec(self, method, args, is_batch=False):\n what = \"%s[%d]\" % (method, len(args) if is_batch else 1)\n body = self.rpc_body(method, args, is_batch)\n body_data = json.dumps(body, ensure_ascii=False).encode('utf8')\n\n tries = 0\n while tries < 100:\n tries += 1\n secs = -1\n info = None\n try:\n start = perf()\n response = self.request(body=body_data)\n secs = perf() - start\n\n info = {'jussi-id': response.headers.get('x-jussi-request-id'),\n 'secs': round(secs, 3),\n 'try': tries}\n\n # strict validation/asserts, error check\n payload = validated_json_payload(response)\n result = validated_result(payload, body)\n\n if secs > 5:\n log.warning('%s took %.1fs %s', what, secs, info)\n\n return result\n\n except (AssertionError, RPCErrorFatal) as e:\n raise e\n\n except (Exception, socket.timeout) as e:\n if secs < 0: # request failed\n secs = perf() - start\n info = {'secs': round(secs, 3), 'try': tries}\n log.warning('%s failed in %.1fs. try %d. %s - %s',\n what, secs, tries, info, repr(e))\n\n if tries % 2 == 0:\n self.next_node()\n sleep(tries / 5)\n\n raise Exception(\"abort %s after %d tries\" % (method, tries))", "def call(self, request):\n return self.wait(self.send(request))", "def encode_ping():\n return encode_command(CommandType.Ping)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clones a proposal. Only the fields that are supposed to be input by the employee are copied. Name of the clone will indicate that it is indeed a clone.
def __copy__(self): copy = super().__copy__() # Zeroes the fields that are invisible for employee. copy.owner = None copy.short_name = None copy.teaching_unit = None copy.major = None copy.level = None copy.year = None # Resets the status back to default. copy.status = ProposalStatus.DRAFT copy.name = "Klon: " + copy.name if copy.name_en: copy.name_en = "Clone: " + copy.name_en return copy
[ "def clone(self, name='', datastore_id=-1):\n self.client.call(self.METHODS['clone'], self.id, name, datastore_id)", "def clone(self):\n clone_patch = Patch(self.program)\n clone_patch.edit_list = deepcopy(self.edit_list)\n clone_patch.test_result = None\n return clone_patch", "def clone(self, data):", "def _clone(context, obj, clone_id):\n return context.manage_clone(obj, clone_id)", "def pvc_clone_ui(\n self,\n project_name,\n pvc_name,\n cloned_pvc_access_mode=constants.ACCESS_MODE_RWO,\n cloned_pvc_name=None,\n ):\n clone_name = cloned_pvc_name or f\"{pvc_name}-clone\"\n self.navigate_persistentvolumeclaims_page()\n\n logger.info(f\"Search and select the project {project_name}\")\n self.do_click(self.pvc_loc[\"pvc_project_selector\"])\n self.do_send_keys(self.pvc_loc[\"search-project\"], text=project_name)\n\n self.wait_for_namespace_selection(project_name=project_name)\n\n logger.info(f\"Search for PVC {pvc_name}\")\n self.do_send_keys(self.pvc_loc[\"search_pvc\"], text=pvc_name)\n\n logger.info(f\"Go to PVC {pvc_name} page\")\n self.do_click(get_element_type(pvc_name))\n\n logger.info(\"Click on Actions\")\n self.do_click(self.pvc_loc[\"pvc_actions\"])\n\n logger.info(\"Click on Clone PVC from dropdown options\")\n self.do_click(self.pvc_loc[\"clone_pvc\"], enable_screenshot=True)\n\n logger.info(\"Clear the default name of clone PVC\")\n ocs_version = version.get_semantic_ocs_version_from_config()\n if (\n self.ocp_version_full == version.VERSION_4_6\n and ocs_version == version.VERSION_4_6\n ):\n self.do_clear(format_locator(self.pvc_loc[\"clone_name_input\"], clone_name))\n else:\n self.do_clear(self.pvc_loc[\"clone_name_input\"])\n\n logger.info(\"Enter the name of clone PVC\")\n if (\n self.ocp_version_full == version.VERSION_4_6\n and ocs_version == version.VERSION_4_6\n ):\n self.do_send_keys(\n format_locator(self.pvc_loc[\"clone_name_input\"], clone_name),\n text=clone_name,\n )\n else:\n self.do_send_keys(self.pvc_loc[\"clone_name_input\"], text=clone_name)\n\n if (\n not self.ocp_version_full == version.VERSION_4_6\n and ocs_version == version.VERSION_4_6\n ):\n logger.info(\"Select Access Mode of clone PVC\")\n self.do_click(self.pvc_loc[cloned_pvc_access_mode])\n\n logger.info(\"Click on Clone button\")\n self.do_click(generic_locators[\"confirm_action\"], enable_screenshot=True)", "def ex_clone_node(self, node, name=None, random_vnc_password=None):\r\n data = {}\r\n\r\n data['name'] = name\r\n data['random_vnc_password'] = random_vnc_password\r\n\r\n path = '/servers/%s/action/' % (node.id)\r\n response = self._perform_action(path=path, action='clone',\r\n method='POST', data=data).object\r\n node = self._to_node(data=response)\r\n return node", "def clone(self, new_sha1, new_author_date=None, new_status='committed', \n clone_files=True):\n\n if not new_author_date:\n new_author_date = datetime.datetime.now\n\n new_commit = Commit.create(\n sha1=new_sha1,\n status=new_status,\n author_date=new_author_date,\n author_name=self.author_name,\n author_email=self.author_email,\n branch=self.branch,\n message=self.message,\n project=self.project)\n\n if clone_files:\n # Copy the files to the new commit\n for cf in self.files:\n new_cf = CommitFile.create(\n commit=new_commit, \n file_path=cf.file_path,\n change_type=cf.change_type)\n\n # Add a log message\n log = CommitLog.create(\n commit=new_commit,\n message='Cloned from %s'%(self.sha1)\n ) \n self.status = 'outdated'\n self.save()\n return new_commit", "def clone_CloneQuestionnaire(request, id):\n return CloneQuestionnaire_update(request, id)", "def clone(self):\n return self.__clone(True)", "def clone_as_draft(self, user):\n\n if not self.job:\n return\n\n # try to generate a unique name for the job owner\n name = self.job.name\n while Job.objects.filter(user=user, name=name).exists():\n name = (self.job.name + '_' + uuid.uuid4().hex)[:255]\n\n # This will be true if the job has 255 Characters in it,\n # In this case, we cannot get a new name by adding something to it.\n # This can be altered later based on the requirement.\n if name == self.job.name:\n # cannot generate a new name, returning none\n return None\n\n # Once the name is set, creating the draft job with new name and owner and same description\n cloned = Job.objects.create(\n name=name,\n user=user,\n description=self.job.description,\n )\n\n # copying other parameters of the job\n clone_job_data(self.job, cloned)\n\n return cloned", "def analyze_clone(\n patient, clone_id,\n clones_mutations, estimates,\n delta_lb=0.0, delta_ub=4.0,\n **kwargs\n):\n mutations = clones_mutations[clones_mutations == clone_id].index\n clone = patient.get_mutations_subset(mutations)\n clone_estimates = estimates.loc[mutations]\n optimizer = Random(\n clone, clone_estimates,\n delta_lb=delta_lb, delta_ub=delta_ub\n )\n return Clone(clone_id, *optimizer.optimize(**kwargs))", "def clone(self):\n fields = dict((k, v.clone() if isinstance(v, FieldSet) else v)\n for k, v in self.fields.iteritems())\n return self.__class__(force_order=self.fields.keys(), **fields)", "def clone_pool(self, pool_name, cloned_for, avi_config, userprefix=None):\n pools = [pool for pool in avi_config['Pool'] if\n pool['name'] == pool_name]\n if pools:\n pool_obj = copy.deepcopy(pools[0])\n pname = pool_obj['name']\n pool_name = re.sub('[:]', '-', '%s-%s' % (pname, cloned_for))\n pool_obj['name'] = pool_name\n avi_config['Pool'].append(pool_obj)\n LOG.info(\n \"Same pool reference to other object. Clone Pool %s for %s\" %\n (pool_name, cloned_for))\n return pool_obj['name']\n return None", "def test_replicate_primitive(self):\n card = examples.Card(1, 'clubs')\n self.assertEqual(card.rank, 1)\n self.assertEqual(card.suit, 'clubs')\n card_copy = self.replicator.replicate(card)\n\n self.assertNotEqual(id(card), id(card_copy))\n self.assertEqual(card, card_copy)\n\n self.assertEqual(card.rank, card_copy.rank)\n self.assertEqual(card.suit, card_copy.suit)", "def Clone(self):\n return self.__class__(self.validator)", "def clone_course(self, source_course_id, dest_course_id, user_id, fields=None):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def clone(self, request, pk):\n notice = get_object_or_404(Notice, pk=pk)\n notice_clone = Notice()\n\n ignore_fields = ['guid', 'id', 'create_dt', 'update_dt',\n 'creator', 'creator_username',\n 'owner', 'owner_username']\n field_names = [field.name\n for field in notice.__class__._meta.fields\n if field.name not in ignore_fields]\n\n for name in field_names:\n setattr(notice_clone, name, getattr(notice, name))\n\n notice_clone.notice_name = 'Clone of %s' % notice_clone.notice_name\n notice_clone.creator = request.user\n notice_clone.creator_username = request.user.username\n notice_clone.owner = request.user\n notice_clone.owner_username = request.user.username\n notice_clone.save()\n\n return redirect(reverse(\n 'admin:memberships_notice_change',\n args=[notice_clone.pk],\n ))", "def clone(self, parent):\n # noinspection PyArgumentList\n return self.__class__(parent)", "def do_clone_description_bundle(self):\n\n target_id = self.param_dict.get(\"target_id\", str())\n bundle_name = self.param_dict.get(\"bundle_name\", str())\n\n result = dict(status=\"success\", message=\"\")\n\n if Description().get_description_handle().find(\n {\"name\": {'$regex': \"^\" + bundle_name + \"$\",\n \"$options\": 'i'}}).count() >= 1:\n result[\"status\"] = \"error\"\n result[\"message\"] = \"Bundle name must be unique\"\n\n self.context[\"result\"] = result\n return self.context\n\n # retrieve clone target\n description = Description().GET(target_id)\n\n # new bundle being created\n try:\n bundle = Description().create_description(profile_id=self.profile_id, component=self.component,\n name=bundle_name, stages=description.get('stages', list()),\n attributes=description.get('attributes', dict()),\n meta=description.get('meta', dict()))\n\n result[\"data\"] = dict(id=str(bundle[\"_id\"]), name=bundle[\"name\"])\n except Exception as e:\n message = \"Couldn't create bundle: \" + bundle_name + \" \" + str(e)\n result[\"status\"] = \"error\"\n result[\"message\"] = message\n\n self.context[\"result\"] = result\n return self.context" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We need the factory to dynamically load daemon config, to support HA failovers
def _get_daemon_factory(): # Dealing with circular dependency try: from cloudify_agent.api.factory import DaemonFactory except ImportError: # Might not exist in e.g. the REST service DaemonFactory = None return DaemonFactory
[ "def load_cfg(cfg_path=CONFIG_FILEPATH):\n try:\n with open(cfg_path, 'r') as fo:\n loaded_config = json.load(fo)\n except (ValueError, IOError, OSError):\n pass\n else:\n try:\n global daemon_host\n daemon_host = loaded_config['cmd_address']\n global daemon_port\n daemon_port = loaded_config['cmd_port']\n return\n except KeyError:\n pass\n # If all go right i will not do this print\n print 'Impossible to load cfg, client_daemon already loaded?'\n print 'Loaded default configuration for socket'", "def load_config(self):\n # Open the file at default lcoation, unless something else\n # is passed in instead\n self.logger.info('Running load_config() for HerdClient')\n if self.config is not None:\n self.logger.debug(\"There's a config file passed in\")\n f = file(self.config)\n self.cfg = Config(f)\n \n # Allow parameters passed on the command line to override the\n # config file\n if self.seed is None:\n self.logger.debug(\"There's no seed passed in\")\n self.seed = self.cfg.management.seed", "def _prepare_daemon_conf(confdir, daemon_conf):\n conf = {\n 'authorization-plugins': ['authz'],\n 'bridge': 'none',\n 'cgroup-parent': 'docker',\n 'default-runtime': 'docker-runc',\n 'exec-opt': ['native.cgroupdriver=cgroupfs'],\n 'hosts': ['tcp://127.0.0.1:2375'],\n 'ip-forward': False,\n 'ip-masq': False,\n 'iptables': False,\n 'ipv6': False,\n 'runtimes': {\n 'docker-runc': {\n 'path': subproc.resolve('docker_runtime'),\n },\n },\n }\n conf.update(daemon_conf)\n\n with open(os.path.join(confdir, 'daemon.json'), 'w') as f:\n json.dump(conf, fp=f)", "def load_configuration(app):\n app.config.from_pyfile(\"../../../config/service_config.py\")", "def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):\n # very often the config section_name is based on the class name\n # the None singleton will be passed through to readconf as is\n if section_name == '':\n section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2',\n klass.__name__).lower()\n try:\n conf = utils.readconf(conf_file, section_name,\n log_name=kwargs.get('log_name'))\n except (ValueError, IOError) as e:\n # The message will be printed to stderr\n # and results in an exit code of 1.\n sys.exit(e)\n\n # patch eventlet/logging early\n utils.monkey_patch()\n eventlet.hubs.use_hub(utils.get_hub())\n\n # once on command line (i.e. daemonize=false) will over-ride config\n once = once or not utils.config_true_value(conf.get('daemonize', 'true'))\n\n # pre-configure logger\n if 'logger' in kwargs:\n logger = kwargs.pop('logger')\n else:\n logger = utils.get_logger(conf, conf.get('log_name', section_name),\n log_to_console=kwargs.pop('verbose', False),\n log_route=section_name)\n\n # optional nice/ionice priority scheduling\n utils.modify_priority(conf, logger)\n\n # disable fallocate if desired\n if utils.config_true_value(conf.get('disable_fallocate', 'no')):\n utils.disable_fallocate()\n # set utils.FALLOCATE_RESERVE if desired\n utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\\n utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))\n\n # By default, disable eventlet printing stacktraces\n eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))\n eventlet.debug.hub_exceptions(eventlet_debug)\n\n # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on\n # some platforms. This locks in reported times to UTC.\n os.environ['TZ'] = 'UTC+0'\n time.tzset()\n\n logger.notice('Starting %s', os.getpid())\n try:\n d = klass(conf)\n DaemonStrategy(d, logger).run(once=once, **kwargs)\n except KeyboardInterrupt:\n logger.info('User quit')\n logger.notice('Exited %s', os.getpid())\n return d", "def loadConfig(self):\n pass", "def parse_factory_yaml(self,config_yaml):\n return config_yaml", "def getRemoteConfigServiceProxy(self):", "def start_conf():\n from oslo_config import cfg\n\n cfg.CONF(\n args=[],\n default_config_files=['service/etc/oslo_conf.ini']\n )", "def load_services_config():\n\n # The service tells what keys and types it requires for configuration\n # and the corresponding data has to be set in .env\n\n for service in [\n ProfileService,\n SMSNotificationService,\n ]:\n template = service.get_config_template()\n service.config = get_config_from_env(template)", "def create(self, io_handler, factory_pid, **kwargs):\n config = self._config_admin.create_factory_configuration(factory_pid)\n\n # Print the configuration PID\n pid = config.get_pid()\n io_handler.write_line(\"New configuration: {0}\", pid)\n\n if kwargs:\n # Update it immediately if some properties are already set\n config.update(kwargs)", "def create_config():\n config_parser = configparser.ConfigParser()\n config_parser.read('../../pythief.ini')\n return config_parser", "def init_configuration(self):\n\n self.app.config.from_envvar('SETTINGS')", "def init():\n db = get_db()\n cur = db.execute('select cache_info,persistent_info from serverconfig')\n ret = cur.fetchone()\n\n if ret == None:\n \tcache_info =''\n persistent_info =''\n else:\n \tcache_info = ret[0]\n persistent_info = ret[1]\n\n #updates the configurations to create instances\n configurations.set_cache_info(cache_info)\n configurations.set_persistent_info(persistent_info)\n\n return configurations", "def configure():\n\n with settings(warn_only=True):\n # disable default site\n sudo('rm /etc/nginx/sites-enabled/default')\n\n # upload nginx server blocks\n put(env.config_dir + '/nginx.conf', '/tmp/nginx.conf')\n sudo('mv /tmp/nginx.conf %s/nginx_pmgbilltracker.conf' % env.project_dir)\n\n # link server blocks to Nginx config\n with settings(warn_only=True):\n sudo('ln -s %s/nginx_pmgbilltracker.conf /etc/nginx/conf.d/' % env.project_dir)\n\n # upload supervisor config\n put(env.config_dir + '/supervisor.conf', '/tmp/supervisor.conf')\n sudo('mv /tmp/supervisor.conf /etc/supervisor/conf.d/supervisor_pmgbilltracker.conf')\n sudo('supervisorctl reread')\n sudo('supervisorctl update')\n\n # configure Flask\n with settings(warn_only=True):\n sudo('mkdir %s/instance' % env.project_dir)\n put(env.config_dir + '/config_backend.py', '/tmp/config_backend.py')\n put(env.config_dir + '/config_frontend.py', '/tmp/config_frontend.py')\n put(env.config_dir + '/config_backend_private.py', '/tmp/config_backend_private.py')\n put(env.config_dir + '/config_frontend_private.py', '/tmp/config_frontend_private.py')\n sudo('mv /tmp/config_backend.py ' + env.project_dir + '/instance/config_backend.py')\n sudo('mv /tmp/config_frontend.py ' + env.project_dir + '/instance/config_frontend.py')\n sudo('mv /tmp/config_backend_private.py ' + env.project_dir + '/instance/config_backend_private.py')\n sudo('mv /tmp/config_frontend_private.py ' + env.project_dir + '/instance/config_frontend_private.py')\n\n restart()\n return", "def _configure_services(self):\n if self.series == 'trusty':\n keystone_config = {'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting',\n 'openstack-origin': 'cloud:trusty-mitaka'}\n designate_config = {'openstack-origin': 'cloud:trusty-mitaka',\n 'nameservers': 'ns1.mojotest.com.'}\n else:\n keystone_config = {'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting'}\n designate_config = {'nameservers': 'ns1.mojotest.com.'}\n\n pxc_config = {\n 'dataset-size': '25%',\n 'max-connections': 1000,\n 'root-password': 'ChangeMe123',\n 'sst-password': 'ChangeMe123',\n }\n\n configs = {\n 'keystone': keystone_config,\n 'designate': designate_config,\n 'percona-cluster': pxc_config,\n }\n\n super(DesignateBindDeployment, self)._configure_services(configs)", "def load_config():\n return config.load_config({})", "def create_config(cls):\n return cls._config_registry", "def load(self):\n if not self.varname in os.environ:\n print(\"Error: Environment variable '{}' not found. Standard options are: \\n{}\".format(self.varname,self.get_standard_configurations()))\n sys.exit(1)\n config_name = os.environ[self.varname]\n print(\"Using configuration environment var {}={}\".format(self.varname,config_name))\n module = self.config_file_finder(config_name)\n try:\n return module.FEMB_CONFIG\n except AttributeError:\n module = self.config_file_finder_ini(config_name)\n try:\n print(\"Found INI file for {}\".format(module['DEFAULT']['NAME']))\n return module\n except:\n print(\"Error: Config module '{}' doesn't contain the class FEMB_CONFIG, no INI file either\".format(config_name))\n sys.exit(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Schedule a channel method to be called from the connection thread. Use this to schedule a channel method such as .publish or .basic_ack to be called from the connection thread.
def channel_method(self, method, channel=None, wait=True, timeout=None, **kwargs): if wait and self._consumer_thread \ and self._consumer_thread is threading.current_thread(): # when sending from the connection thread, we can't wait because # then we wouldn't allow the actual send loop (._process_publish) # to run, because we'd block on the err_queue here raise RuntimeError( 'Cannot wait when sending from the connection thread') # the message is going to be sent from another thread (the .consume # thread). If an error happens there, we must have a way to get it # back out, so we pass a Queue together with the message, that will # contain either an exception instance, or None err_queue = queue.Queue() if wait else None envelope = { 'method': method, 'message': kwargs, 'err_queue': err_queue, 'channel': channel } self._connection_tasks_queue.put(envelope) if err_queue: err = err_queue.get(timeout=timeout) if isinstance(err, Exception): raise err
[ "def on_channel_open(self, channel):\n logger.info('Channel opened..')\n\n self._channel = channel\n self._channel.basic_qos(prefetch_count=10)\n self.add_on_channel_close_callback()\n if self._exchange:\n self.setup_exchange(self._exchange, True)", "def publish(self, channel: str, message: str) -> None:\n\n if self.channels.get(channel, None):\n self.channels[channel].put_nowait(message)\n else:\n raise Exception(f'Channel {channel} does not exists!')", "def call_from_thread(method, *args):\n reactor.callFromThread(method, *args)", "def _connect(self):\n logger.debug(f\"Attempt to connect to RabbitMQ: connection_params={self.connection_parameters}\")\n self.connection = pika.BlockingConnection(self.connection_parameters)\n logger.info(f\"Connection Created\")\n self.channel = self.connection.channel()\n logger.info(\"Channel Created\")\n self.channel.confirm_delivery() # ensure persistence prior to message confirmation\n self.channel.basic_qos(prefetch_count=100) # only 1 un-Acked message delivered at a time TODO change back to 1.\n logger.info(\"Channel configured\")", "def run(self):\n LOGGER.info(\"Consumer %s is running...\" % self.consumer_name)\n assert self._connection is not None, \"Connection is None\"\n assert self._connection.is_open, \"Connection is closed\"\n self.open_channel()", "def publish(self, cls, refresh_method='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg\r\n pass", "def subscribe(channel: str, callback: Callable[..., Any]) -> None:\n _get().subscribe(channel, callback)", "def publish(self, msg, exchange):\n\n try:\n self._publish(msg, exchange)\n except pika.exceptions.ConnectionClosed:\n logging.info('Reconnecting to queue')\n self.connect(exchange)\n self._publish(msg, exchange)", "def update_channel(self, channel):", "def _patch(self):\n\n self.logger.debug(\"Injecting Connectable.connect hook...\")\n original = discord.abc.Connectable.connect\n\n async def connect(obj, *, timeout=60):\n vc = await original(obj, timeout=timeout)\n await self.registerChannelHandle(vc)\n\n self.logger.debug(\"Injecting DiscordVoiceWebSocket.receive_message hook...\")\n async def received_message(msg):\n\n await self.handle_ws_event(msg, vc)\n return await discord.gateway.DiscordVoiceWebSocket.received_message(vc.ws, msg)\n\n vc.ws.received_message = received_message\n\n self.logger.debug(\"...Success!\")\n return vc\n \n discord.abc.Connectable.connect = connect\n self.logger.debug(\"...Success!\")", "def publish(self, package):\n try:\n self.connect()\n self._publish(package)\n except (pika.exceptions.IncompatibleProtocolError, pika.exceptions.ConnectionClosed):\n LOGGER.info('reconnecting to queue in 5 sec...')\n time.sleep(5)\n self.publish(package)", "def _on_channel_opened(self, channel):\n\n # Create a channel to use\n self._channel = channel\n\n # Log the open channel\n self._logger.info('RabbitMQ channel opened at %s:%i',\n self.params.host, self.params.port)", "def at_channel_create(self):\r\n pass", "def __on_channel_open(self, channel):\n LOGGER.info('Channel opened, adding channel close callback')\n channel.add_on_close_callback(self.__on_channel_closed)\n LOGGER.info('Declaring exchange %s', self.exchange)\n channel.exchange_declare(self.__on_exchange_declareok,\n self.exchange,\n self.exchange_type)\n self._channel = channel", "def channelJoined(self, channel):", "def schedule(self) -> None:\n if self.should_schedule is False:\n return\n\n with self.schedule_lock:\n self._schedule()", "def at_channel_create(self):\n pass", "def subscribe(self,\n method=None,\n topic=None):\n topic = self._transcribe_topic(topic)\n pub.subscribe(method, topic)", "async def channel(self, name='default'):\n future = asyncio.Future(loop=self.loop)\n\n if not self._connecting.done(): # pragma: no cover\n self.log.debug('Await connecting...')\n await self._connecting\n\n if name in self._channels_opening:\n if not self._channels_opening[name].done():\n self.log.debug('Channel already opening, wait it...')\n return await self._channels_opening[name]\n\n if name in self._channels and self._channels[name].is_open:\n future.set_result(self._channels[name])\n return await future\n\n self._channels_opening[name] = self._create_future()\n\n def on_channel(channel: pika.channel.Channel):\n \"\"\"On channel closed handler.\n \"\"\"\n channel.add_on_close_callback(self.on_channel_closed)\n channel.basic_qos(prefetch_count=100)\n self._channels[name] = channel\n try:\n self._channels_opening[name].set_result(channel)\n except asyncio.InvalidStateError: # pragma: no cover\n pass\n future.set_result(channel)\n\n self.connection.channel(on_open_callback=on_channel)\n return await asyncio.wait_for(future, timeout=DECLARE_CHANNEL_TIMEOUT)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make the queue name for this handler based on the correlation id
def _queue_name(self, correlation_id): return '{0}_response_{1}'.format(self.exchange, correlation_id)
[ "def name(self):\n return self._queue.name", "def get_queue_name():\r\n return getattr(settings, 'SEARCH_QUEUE_NAME', 'haystack_search_queue')", "def get_panda_queue_name(self, panda_resource):\n try:\n panda_queue = self.get(panda_resource).get('nickname')\n return panda_queue\n except Exception:\n return None", "def sqs_name():\n if is_local_env():\n return LOCAL_QUEUE_NAME\n\n # get data from parameter store with correct key\n # sqs_name = get_params_from_ssm()[\"CORRECT_KEY\"]\n return \"sqs_name\"", "def queue_id(self, queue_id):\n\n self._queue_id = queue_id", "def mqrt_MQHandleToFormatName(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hQueue\", \"lpwcsFormatName\", \"lpdwCount\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_name(self):\n return '{event_name} Event Handler'.format(\n event_name=SecurityLoanGeneral.get_event_name()\n )", "def _generateName(self):\n return \"PoolThread-{}-{}\".format(self.name or id(self), self.workers)", "def handle_create(self):\r\n queue_name = self.physical_resource_name()\r\n queue = self.marconi().queue(queue_name, auto_create=False)\r\n # Marconi client doesn't report an error if an queue with the same\r\n # id/name already exists, which can cause issue with stack update.\r\n if queue.exists():\r\n raise exception.Error(_('Message queue %s already exists.')\r\n % queue_name)\r\n queue.ensure_exists()\r\n self.resource_id_set(queue_name)\r\n return queue", "def get_name(mosaic_id):\n return f'{Mosaic.name_prefix}{Mosaic.get_id_str(mosaic_id)}'", "def add_queue_info(record):\n record.queue_id = background_helper.lookup_queue_for_action(record.action)\n return record", "def log_request_id_in_threadname(event):\n current_thread = threading.current_thread()\n original_name = current_thread.name\n\n # Hack the thread's name to inject a UUID\n registry = event.request.registry\n colored_logs = asbool(registry.settings.get(get_key('color'), False))\n\n request_id = event.request.id\n\n if colored_logs:\n request_id = colorize_text(request_id)\n\n current_thread.name = \"%s][request=%s\" % (\n original_name,\n request_id,\n )\n\n def unhack_thread_name(request):\n # Restore the thread's original name\n current_thread.name = original_name\n\n event.request.add_finished_callback(unhack_thread_name)", "def test_create_queue_passing_integer_as_name():\n cf = boto3.client(\"cloudformation\", region_name=\"us-east-1\")\n client = boto3.client(\"sqs\", region_name=\"us-east-1\")\n\n stack_name = str(uuid4())[0:6]\n q_name = f\"{randint(10000000, 99999999)}\"\n template_body = simple_queue.substitute(q_name=q_name)\n cf.create_stack(StackName=stack_name, TemplateBody=template_body)\n\n queue_urls = client.list_queues(QueueNamePrefix=q_name[0:6])[\"QueueUrls\"]\n queue_urls.should.have.length_of(1)", "def _generate_suffix(record, client, **kwargs):\n recid = record.pid.pid_value\n return f\"{client.name}.{recid}\"", "def get_key_name(cls, name: str) -> str:\n return f\"redis-lock:{name}\"", "def unique_id(self) -> str:\n return f\"{self._controller.controller_id}-delay\"", "def cq_commerce_asset_handler_name(self) -> ConfigNodePropertyString:\n return self._cq_commerce_asset_handler_name", "def generate_unique_container_name(self, identifier=None):\n parts = []\n parts.append(self.config.base_container_name)\n if identifier:\n parts.append(identifier)\n parts.append(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M'))\n parts.append(str(uuid.uuid4()).replace('-', ''))\n return '_'.join(parts)", "def get_queue(self, task_name):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test checks the situation where we're currently present in a child directory of the project root. The paths in the default map should be configured to be relative to the project root, and NOT from the current working directory.
def _(project_root: Path = fake_project_pyproject): fake_context = types.SimpleNamespace( params={"path": (str(project_root),)}, default_map={}, ) with mock.patch.object(Path, "cwd", return_value=project_root / "a" / "d"): assert set_defaults_from_config(fake_context, None, None) == fake_context.default_map # type: ignore[arg-type] assert fake_context.default_map == { "exclude": (str(project_root / "a" / "b"),), "path": (str(project_root / "a"), str(project_root / "x" / "y")), "order": "hello world", } assert fake_context.params["config_path"] == project_root / "pyproject.toml"
[ "def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))", "def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"", "def test_template_lookup_path(self):\n lookup_list = settings.TEMPLATES[0]['DIRS']\n found_path = False\n \n for entry in lookup_list:\n entry_normalised = os.path.normpath(entry)\n \n if entry_normalised == os.path.normpath(settings.TEMPLATE_DIR):\n found_path = True\n \n self.assertTrue(found_path, f\"{FAILURE_HEADER}Your project's templates directory is not listed in the TEMPLATES>DIRS lookup list. Check your settings.py module.{FAILURE_FOOTER}\")", "def test_root_absolute(self):\n new = CrawlConfig.CrawlConfig({'root': '.'})\n self.expected(os.getcwd(), new.get('DEFAULT', 'root'))\n new.add_section('crawler')\n new.set('crawler', 'logpath', '%(root)s/xyzzy.log')\n self.expected(\"%s/xyzzy.log\" % os.getcwd(),\n new.get('crawler', 'logpath'))", "def test_discover_conf_py_directory_search_parents():\n with tempfile.TemporaryDirectory() as tempdir:\n root_dir = pathlib.Path(tempdir)\n _install_conf_py(root_dir)\n os.makedirs(str(root_dir / \"a\" / \"b\"))\n expected = pathlib.Path(tempdir).resolve()\n assert discover_conf_py_directory(root_dir / \"a\" / \"b\") == str(\n expected\n )", "def test_get_true_dir_no_symlinks(project_dir: Path):\n # GIVEN a directory with some files but no symlinked files\n a_file: Path = Path(project_dir, \"hello.txt\")\n a_file.touch()\n assert a_file.exists()\n\n # WHEN fetching the true dir for the files in fixture dir\n true_dir = helpers.get_true_dir(a_file.parent)\n\n # THEN assert that the true_dir is None since there where no symbolic links in the project_dir\n assert true_dir is None", "def testCheckDirectory(self):\n with self.assertRaises(ValueError):\n check_parent(\"\")\n with self.assertRaises(ValueError):\n check_parent(None)\n check_parent(\"sample/not_here.tif\")\n check_parent(\"output/create_me/create_me_as_well/\")\n check_parent(\"output/create_me_too/file_here.tif\")\n self.assertTrue(os.path.exists(\"output/create_me/\"))\n self.assertTrue(os.path.exists(\"output/create_me/create_me_as_well\"))\n self.assertTrue(os.path.exists(\"output/create_me_too/\"))", "def test_check_dir_existence_root_is_wrong(self):\n self.assertFalse(check_dir_existence('/some/wrong/path', self.existing_dirs))", "def test_entangled_url_mapping(self):\n self.env.append_path(self.path('a'), '/a')\n # Map a subdir to something else\n self.env.url_mapping[self.path('a/sub')] = '/s'\n self.create_files({'a/sub/foo': '42'})\n # The most inner url mapping, path-wise, takes precedence\n assert self.mkbundle('sub/foo').urls() == ['/s/foo']", "def test_search_parents_found():\n with tempfile.TemporaryDirectory() as tempdir:\n root_dir = pathlib.Path(tempdir)\n os.makedirs(str(root_dir / \"a\" / \"b\"))\n _install_conf_py(root_dir)\n assert _search_parents(root_dir / \"a\" / \"b\") == root_dir", "def test_check_dir_existence_sub_dir_not_found(self):\n self.assertFalse(self.existing_dirs.append('unexpected_dir'))", "def test_relativepath():\n folder = om.Folder(persist)\n for child in folder:\n om_relpath = child.relativepath\n parent_path = os.path.join(folder.path, om.constant.Meta)\n manual_relpath = os.path.relpath(child.path, parent_path)\n\n assert_equals(om_relpath, manual_relpath)\n\n # Manually adding a child\n channel = om.Channel(os.path.join(persist, r'.meta\\chan.txt'), folder)\n assert_equals(channel.relativepath, os.path.relpath(channel.path, os.path.join(folder.path, om.constant.Meta)))\n \n channel = om.Channel(os.path.join(dynamic, r'.meta\\chan.txt'), folder)\n assert_true(os.path.isabs(channel.relativepath))", "def do_test_executor_classpath_relativize(self, executor):\n here = os.path.abspath(\".\")\n runner = executor.runner([here], \"bogus\")\n self.assertFalse(here in runner.cmd)\n parts = runner.cmd.split(\" \")\n found = False\n for i, part in enumerate(parts):\n if part == \"-cp\":\n self.assertTrue(os.path.abspath(parts[i + 1]) == here)\n found = True\n self.assertTrue(found)", "def test_default_route_relative_path(self):\n path = '.'\n def_route = DefaultRoute(path)\n #assert_regexp_matches(def_route.default_handler_args['path'], '.')\n assert path in def_route.default_handler_args['path']", "def test_windows_paths(self):\n\n current_path = os.path\n import ntpath\n\n os.path = ntpath\n try:\n\n class NoCompileTemplate(Template):\n def _compile_from_file(self, path, filename):\n self.path = path\n return Template(\"foo bar\").module\n\n t1 = NoCompileTemplate(\n filename=\"c:\\\\foo\\\\template.html\",\n module_directory=\"c:\\\\modules\\\\\",\n )\n\n eq_(t1.uri, \"/foo/template.html\")\n eq_(t1.path, \"c:\\\\modules\\\\foo\\\\template.html.py\")\n\n t1 = NoCompileTemplate(\n filename=\"c:\\\\path\\\\to\\\\templates\\\\template.html\",\n uri=\"/bar/template.html\",\n module_directory=\"c:\\\\modules\\\\\",\n )\n\n eq_(t1.uri, \"/bar/template.html\")\n eq_(t1.path, \"c:\\\\modules\\\\bar\\\\template.html.py\")\n\n finally:\n os.path = current_path", "def test_default_output_dir_exists():\n\n assert os.path.exists(\"corems_output\")", "def test_default_config_path(user_config_path):\n assert config.USER_CONFIG_PATH == user_config_path", "def test_safe_copy_makedir_exists(self):\n pass", "def test_get_parent_dir(self):\n self.assertEquals(util.fileops.get_parent_dir('/home/brandon/test/test.py'),\n '/home/brandon/test')\n self.assertEquals(util.fileops.get_parent_dir('test.html'), '')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The getSshTriggerCounters task connects to the Node parameter and executes all associated SshCounter from the Trigger parameter
def getSshTriggerCounters(node,trigger): logger.debug('SSH Getting ' + trigger.name + ' SshCounter counters from ' + node.name) output=[] #Checking if the trigger has got SshCounter counters = trigger.counters.all().select_subclasses() hascounters=False for counter in counters: if isinstance(counter, SshCounter): hascounters=True if hascounters == False: return 'SSH Trigger ' + trigger.name + ' does not have SshCounter counters' logger.debug('SSH Connecting to ' + node.sshprofile.user + '@' + node.hostname) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: mykey = paramiko.RSAKey.from_private_key_file(node.sshprofile.keyfile) ssh.connect(node.hostname, username=node.sshprofile.user, pkey = mykey) except Exception, e: #Exit if we can not connect to the node via SSH error = 'SSH Error connecting to ' + node.hostname logger.error(error) logger.error(str(e)) return error logger.debug('SSH Connected to ' + node.hostname) # Loop each trigger counter and get value from node for counter in counters: if isinstance(counter, SshCounter): logger.debug('SSH executing ' + counter.script) try: #channel = ssh.get_transport().open_session() stdin, stdout, stderr = ssh.exec_command(counter.script) value='' if stdout.channel.recv_exit_status() != 0: raise Exception("Error executing "+ counter.script) for line in stdout: value = value + line.strip('\n') longkey = 'SSH ' + node.name + ' ' + counter.name + ' ' + datetime.datetime.now().strftime('%Y%m%d%H%M') except Exception, e: error = 'SSH Error getting executing ' + counter.script + ' from Trigger "' + trigger.name + '" on ' + node.name + '. Exit status = ' + str(stdout.channel.recv_exit_status()) logger.error(error) logger.error(str(e)) ssh.close() return error key = 'ssh_sshcounter.' + str(node.pk) + '.' + str(counter.pk) # Update threshold counter in memached thresholdCounter = cache.get(key) if thresholdCounter == None: thresholdCounter = 0 thresholdCounter = int(thresholdCounter) if counter.comparison == ">": if float(value) > counter.threshold: thresholdCounter = thresholdCounter + 1 else: thresholdCounter = 0 if counter.comparison == "<": if float(value) < counter.threshold: thresholdCounter = thresholdCounter + 1 else: thresholdCounter = 0 if counter.comparison == "=": if float(value) == counter.threshold: thresholdCounter = thresholdCounter + 1 else: thresholdCounter = 0 cache.set(key,thresholdCounter,86400) key = key + '.' + datetime.datetime.now().strftime('%Y%m%d%H%M') #Send value to cache backend logger.debug('SSH value: ' + node.name + '.'+ counter.name + ':' + value) logger.debug('SSH cache entry: ' + key + ':' + value) cache.set(key,value,86400) output.append([node.name + '.' + counter.name,value]) ssh.close() return output
[ "def _cx_counters_psutil(self):\n for iface, counters in psutil.net_io_counters(pernic=True).iteritems():\n metrics = {\n 'bytes_rcvd': counters.bytes_recv,\n 'bytes_sent': counters.bytes_sent,\n 'packets_in.count': counters.packets_recv,\n 'packets_in.error': counters.errin,\n 'packets_out.count': counters.packets_sent,\n 'packets_out.error': counters.errout,\n }\n self._submit_devicemetrics(iface, metrics)", "def counters(cli_opts, json): # noqa: B902\n\n return_code = fib.FibCountersCmd(cli_opts).run(json)\n sys.exit(return_code)", "def ssh_machine_proc_stats(hostname, filepath): \n # Execute ssh command, get results\n cat_output = subprocess.check_output(['ssh', hostname, 'cat', filepath]) \n return cat_output", "def counters ( self ) :\n return self._counters", "def getIbvCountersWrapper(args):\n return getIbvCounters(*args)", "def _cx_state_psutil(self):\n metrics = defaultdict(int)\n for conn in psutil.net_connections():\n protocol = self._parse_protocol_psutil(conn)\n status = self.tcp_states['psutil'].get(conn.status)\n metric = self.cx_state_gauge.get((protocol, status))\n if metric is None:\n self.log.warning('Metric not found for: %s,%s', protocol, status)\n else:\n metrics[metric] += 1\n\n for metric, value in metrics.iteritems():\n self.gauge(metric, value)", "def executeShellStats(self):\n return subprocess.check_output([self.SHELL_PATH + '/stats.sh',\n self.TOKEN,\n self.GITLAB_GROUP,\n self.PLAIN_PROJECT,\n self.ROOT_PATH])", "def list_counters():\n print(\"\\n\".join(query_column(\"SELECT counter FROM counters\")))", "def _collect_service_restart_stats():\n try:\n service_dict = ServiceStateWrapper().get_all_services_status()\n except Exception as e:\n logging.error(\"Could not fetch service status: %s\", e)\n return\n for service_name, status in service_dict.items():\n SERVICE_RESTART_STATUS.labels(\n service_name=service_name,\n status=\"Failure\",\n ).set(status.num_fail_exits)\n SERVICE_RESTART_STATUS.labels(\n service_name=service_name,\n status=\"Success\",\n ).set(status.num_clean_exits)", "def trigger_checkCOUNT(self):\n self.open.write('TRIGGER:COUNT?')\n reply = self.open.read() \n return('Trigger Count: ' + str(reply))", "def _init_counts(self, execution_id=None, base_execution_id=None):\n # all counts are divided by execution_id, which provides a context for the scheduler's execution, so that\n # it can be reused in multiple contexts\n\n # stores total the number of occurrences of a node through the time scale\n # i.e. the number of times node has ran/been queued to run in a trial\n if execution_id not in self.counts_total:\n self.counts_total[execution_id] = {}\n\n if base_execution_id is not None:\n if base_execution_id not in self.counts_total:\n raise SchedulerError('execution_id {0} not in {1}.counts_total'.format(base_execution_id, self))\n\n self.counts_total[execution_id] = {\n ts: {n: self.counts_total[base_execution_id][ts][n] for n in self.nodes} for ts in TimeScale\n }\n else:\n self.counts_total[execution_id] = {\n ts: {n: 0 for n in self.nodes} for ts in TimeScale\n }\n\n # counts_useable is a dictionary intended to store the number of available \"instances\" of a certain node that\n # are available to expend in order to satisfy conditions such as \"run B every two times A runs\"\n # specifically, counts_useable[a][b] = n indicates that there are n uses of a that are available for b to expend\n # so, in the previous example B would check to see if counts_useable[A][B] >= 2, in which case B can run\n # then, counts_useable[a][b] would be reset to 0, even if it was greater than 2\n if execution_id not in self.counts_useable:\n self.counts_useable[execution_id] = {}\n\n if base_execution_id is not None:\n if base_execution_id not in self.counts_useable:\n raise SchedulerError('execution_id {0} not in {1}.counts_useable'.format(base_execution_id, self))\n\n self.counts_useable[execution_id] = {\n node: {n: self.counts_useable[base_execution_id][node][n] for n in self.nodes} for node in self.nodes\n }\n else:\n self.counts_useable[execution_id] = {\n node: {n: 0 for n in self.nodes} for node in self.nodes\n }\n\n if execution_id not in self.execution_list:\n if base_execution_id is not None:\n if base_execution_id not in self.execution_list:\n raise SchedulerError('execution_id {0} not in {1}.execution_list'.format(base_execution_id, self))\n\n self.execution_list[execution_id] = list(self.execution_list[base_execution_id])\n else:\n self.execution_list[execution_id] = []\n\n self._init_clock(execution_id, base_execution_id)", "async def get_tileable_ref_counts(self, tileable_keys: List[str]) -> List[int]:\n return await self._lifecycle_tracker_ref.get_tileable_ref_counts(tileable_keys)", "async def counter(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(f\"run {ctx.prefix}help counter\")", "def status():\n for site, site_config in config.sites.items():\n group_name = get_group_name(site, site_config)\n for process_name, process_cmd in site_config.processes.items():\n run('sudo supervisorctl status %s:%s' % (group_name, process_name))", "def run(self, state, data=None, context=None, auth=None):\n\n self.fill_configuration(state, data, PARAMS)\n\n for host in self.conf[state]['hosts']:\n\n command = 'sosreport --batch --name %s' % host\n\n if 'onlyplugins' in self.conf[state]:\n command += ' -o %s' % ','.join(self.conf[state]['onlyplugins'])\n else:\n if 'enableplugins' in self.conf[state]:\n command += ' -e %s' % ','.join(\n self.conf[state]['enableplugins']\n )\n if 'noplugins' in self.conf[state]:\n command += ' -n %s' % ','.join(\n self.conf[state]['noplugins']\n )\n\n playbook = \"\"\"\n- hosts: %s\n vars:\n dci_status: %s\n dci_comment: 'Collecting sosreport for %s'\n dci_log_prefix: 'sosreport'\n tasks:\n - name: Install sos package\n package:\n name: sos\n\n - name: Run sosreport\n shell: %s\n register: sosreport_output\n\n - name: Upload sosreport\n dci_upload:\n file: \"{{ sosreport_output.stdout_lines[-5].strip() }}\"\n dci_login: %s\n dci_password: %s\n dci_cs_url: %s\n dci_status: %s\n job_id: %s\n\"\"\" % (host, state, host, command, context.login, auth['dci_password'], context.dci_cs_api.replace('/api/v1', ''), state, context.last_job_id) # noqa\n\n return self.run_playbook(playbook, context)", "def totals(self, session, host=None):\n if host:\n s = Search(using=self.client).query(\"match_phrase\", session=session) \\\n .filter(\"term\", ansible_type=\"task\") \\\n .filter(\"term\", ansible_host=host)\n else:\n s = Search(using=self.client).query(\"match_phrase\", session=session) \\\n .filter(\"term\", ansible_type=\"task\")\n tasks = s.scan()\n tasks = [task.to_dict() for task in tasks]\n totals = {\n \"OK\": 0,\n \"FAILED\": 0,\n \"UNREACHABLE\": 0,\n \"CHANGED\": 0,\n \"SKIPPED\": 0,\n }\n for task in tasks:\n result = task['status']\n if result == 'OK':\n # check if it was a change\n if json.loads(task['ansible_result'])['changed'] == True:\n result = 'CHANGED'\n totals[result] += 1\n return totals", "def check_counter_all(management, args):\n global logger\n\n logger.debug('Retrieving all hosted services')\n hosted_services = management.list_hosted_services()\n error_code_all = 0\n errors = []\n if not hosted_services:\n error_code_all = 2\n errors.append('No hosted services found')\n for service in hosted_services:\n logger.debug('Checking counter in '+service.service_name)\n error_code, error = check_counter(management, \n service.service_name, args)\n errors.append(' '.join(('{0}:'.format(service.service_name), error)))\n error_code_all = max (error_code_all, error_code)\n return error_code_all, '; '.join(errors)", "async def number_update_servers(self) -> int:\n\t\tself._logger.debug(\"Getting number update servers\")\n\t\tquery = \"SELECT Count(*) FROM servers WHERE output_channel IS NOT NULL\"\n\n\t\tasync with self.pool.acquire() as conn:\n\t\t\tasync with conn.transaction():\n\t\t\t\tres = await conn.fetchval(query)\n\n\t\t\t\treturn res", "async def socketstats(self, ctx: commands.Context):\n delta = ctx.message.created_at - self.bot._start_time\n minutes = delta.total_seconds() / 60\n total = sum(self.bot.socket_stats.values())\n cpm = total / minutes\n socket_stats = \"\\n\".join(\n f\"{name}: {count}\" for name, count in self.bot.socket_stats.items())\n await ctx.send(f'{total} socket events observed ({cpm:.2f}/minute):\\n```\\n{socket_stats}\\n```')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Carries out the event of current next_active_node, and return the next next_active_node
def event_and_return_nextnode(simself, next_active_node): next_active_node.have_event() for node in simself.transitive_nodes: node.update_next_event_date() self.assertEqual( node.number_of_individuals, len(node.all_individuals)) return simself.find_next_active_node()
[ "def next_node(self):\n self.current_idx += 1\n return self.suggested_node()", "def get_next(self):\r\n return self.next_node", "def next_node(self):\n return self.suggested_node()", "def next(self):\n if self.is_complete():\n return None\n return self.tree.children[self.dot]", "def nextVisibleNodeOf(self, node):\n if node.parent is None:\n idx = self.roots.index(node)\n if idx == len(self.roots) -1: # last root\n return node\n else:\n return self.roots[idx+1]\n else:\n children = node.parent.children\n idx = children.index(node)\n if idx is len(children)-1:\n return self.nextVisibleNodeOf( node.parent ) \n else:\n return children[idx + 1 ]\n\n \n if len(node.children ) == 0 or not node.expanded:\n return node\n return self.lastVisibleNodeOf(node.children[-1])", "def get_next_node(self, previous):\n try:\n return dereference_pointer(get_pointer(previous) ^ self.both)\n except KeyError:\n return None", "def next(self) -> Optional[BaseInvocation]:\n\n # TODO: enable multiple nodes to execute simultaneously by tracking currently executing nodes\n # possibly with a timeout?\n\n # If there are no prepared nodes, prepare some nodes\n next_node = self._get_next_node()\n if next_node is None:\n prepared_id = self._prepare()\n\n # Prepare as many nodes as we can\n while prepared_id is not None:\n prepared_id = self._prepare()\n next_node = self._get_next_node()\n\n # Get values from edges\n if next_node is not None:\n self._prepare_inputs(next_node)\n\n # If next is still none, there's no next node, return None\n return next_node", "def nextNode(self):\n # var node, result, following;\n node = self.currentNode\n\n if isinstance(node, str):\n node = Text(node)\n # return node\n\n result = NodeFilter.FILTER_ACCEPT\n while True:\n # print('rrr:::', result, node)\n if isinstance(node, str):\n Text(node)\n # continue\n\n while result != NodeFilter.FILTER_REJECT and node.firstChild != None:\n # print('rrr222:::', result, node)\n node = node.firstChild\n if isinstance(node, str):\n node = Text(node)\n # result = NodeFilter.FILTER_REJECT\n # continue\n # break\n # return None\n\n result = nodeFilter(self, node)\n if result == NodeFilter.FILTER_ACCEPT:\n self.currentNode = node\n return node\n following = nextSkippingChildren(node, self.root)\n if following != None:\n node = following\n else:\n # print('NONE')\n return None\n result = nodeFilter(self, node)\n if result == NodeFilter.FILTER_ACCEPT:\n self.currentNode = node\n return node", "def nextInterarrival(self):\r\n return self.interarrivalFcn()", "def nextPrevNode(self, node, forward=True):\n for row in range(self.count()):\n if self.item(row).node == node:\n if forward:\n row += 1\n if row >= self.count():\n row = 0\n else:\n row -= 1\n if row < 0:\n row = self.count() - 1\n return self.item(row).node\n return None", "def _get_next_node(self) -> Optional[BaseInvocation]:\n g = self.execution_graph.nx_graph()\n\n # Depth-first search with pre-order traversal is a depth-first topological sort\n sorted_nodes = nx.dfs_preorder_nodes(g)\n\n next_node = next(\n (\n n\n for n in sorted_nodes\n if n not in self.executed # the node must not already be executed...\n and all((e[0] in self.executed for e in g.in_edges(n))) # ...and all its inputs must be executed\n ),\n None,\n )\n\n if next_node is None:\n return None\n\n return self.execution_graph.nodes[next_node]", "def get_next_nodes(self, n):\n return # osid.hierarchy.Node", "def next_node(open_nodes):\n open_nodes.sort(key=lambda entry: (entry.h_value + entry.g_value, entry.h_value), reverse=True)\n open_nodes[-1].visited = True\n return open_nodes.pop()", "def set_next_node(self):\n if not self.current_pq:\n # if priority queue is empty, set to none\n self.current=None\n self.state = np.zeros(LundCoordinates.dimension)\n else:\n # first get the tree node of branch with largest delta R separation\n self.current = hq.heappop(self.current_pq)\n # then set up the internal state to current values\n self.state = self.current.state()", "def get_next_bank_node(self):\n return # osid.assessment.BankNode", "def _get_node(self):\n next_node = self._current_node % len(self._cluster)\n self._current_node += 1\n return self._cluster[next_node]", "def get_next(current):\n for index,value in enumerate(STATES):\n if value == current:\n if index == len(STATES)-1:\n return STATES[0]\n else:\n return STATES[index+1]", "def nextSibling(self):\n # return self.currentNode.nextSibling\n return traverseSiblings(self, 'next')", "def new_active(endTriplet, activeObstacle):\n current = endTriplet\n while current.parent.obstacles[2] != activeObstacle:\n current = current.parent\n return current", "def get_next_sibling(self):\n next = super(BaseLesson, self).get_next_sibling()\n try:\n if next.course_id == self.course_id:\n return next\n else:\n return None\n except:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the server priority function when we prioritise the server that was less busy throughout the simulation.
def test_server_priority_function_allocate_to_less_busy(self): def get_server_busy_time(server, ind): return server.busy_time ciw.seed(0) Q = ciw.Simulation(ciw.create_network( arrival_distributions=[ciw.dists.Exponential(1)], service_distributions=[ciw.dists.Exponential(2)], number_of_servers=[2], server_priority_functions=[get_server_busy_time] ) ) Q.simulate_until_max_time(1000) expected_times = [245.07547532640024, 244.68396417751663] for i, srv in enumerate(Q.nodes[1].servers): self.assertEqual(srv.busy_time, expected_times[i])
[ "def test_server_priority_function_allocate_to_last_server_first(self):\n def get_server_busy_time(server, ind):\n return -server.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [158.68745586286119, 331.0719836410557]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_server_priority_function_two_nodes(self):\n def prioritise_less_busy(srv, ind):\n return srv.busy_time\n\n def prioritise_highest_id(srv, ind):\n return -srv.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1), ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2), ciw.dists.Exponential(2)],\n number_of_servers=[2, 2],\n routing=[[0, 0], [0, 0]],\n server_priority_functions=[prioritise_less_busy, prioritise_highest_id]\n )\n )\n Q.simulate_until_max_time(1000)\n expected_times_node_1 = [256.2457715650031, 257.59339967047254]\n expected_times_node_2 = [157.35577182806387, 356.41473247082365]\n\n for i, (srv_1, srv_2) in enumerate(zip(Q.nodes[1].servers, Q.nodes[2].servers)):\n self.assertEqual(srv_1.busy_time, expected_times_node_1[i])\n self.assertEqual(srv_2.busy_time, expected_times_node_2[i])", "def test_highest_spm_priority_host_non_responsive(self):\n new_priority = range(1, len(self.hsm_hosts) + 1)\n self.set_priorities(priorities=new_priority, hosts=self.hsm_hosts)\n rhevm_helpers.maintenance_and_activate_hosts(\n hosts=[self.spm_host], activate=False\n )\n logger.info(\n \"Blocking connection between %s and %s\",\n self.high_spm_priority_host, config.VDC\n )\n self.former_spm = self.spm_host\n\n assert storage_helpers.setup_iptables(\n self.host_ip, self.engine_ip, block=True\n ), \"Unable to block connection between %s and %s\" % (\n self.high_spm_priority_host, config.VDC\n )\n self.wait_for_spm_host_and_verify_identity(\n self.low_spm_priority_host\n )", "def testPriority(self):\n element = WorkQueueElement(RequestName='backend_test',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n highprielement = WorkQueueElement(RequestName='backend_test_high',\n WMSpec=self.processingSpec,\n Status='Available', Jobs=10,\n SiteWhitelist=[\"place\"],\n Priority=100)\n element2 = WorkQueueElement(RequestName='backend_test_2',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n element3 = WorkQueueElement(RequestName='backend_test_3',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n lowprielement = WorkQueueElement(RequestName='backend_test_low',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=0.1)\n self.backend.insertElements([element])\n self.backend.availableWork({'place': 1000}, {})\n # timestamp in elements have second coarseness, 2nd element must\n # have a higher timestamp to force it after the 1st\n time.sleep(1)\n self.backend.insertElements([lowprielement, element2, highprielement])\n self.backend.availableWork({'place': 1000}, {})\n time.sleep(1)\n self.backend.insertElements([element3])\n work = self.backend.availableWork({'place': 1000}, {})\n # order should be high to low, with the standard elements in the order\n # they were queueud\n self.assertEqual([x['RequestName'] for x in work[0]],\n ['backend_test_high', 'backend_test', 'backend_test_2',\n 'backend_test_3', 'backend_test_low'])", "def test_qos_bw_limit(self):\n self.server.wait_for_iperf3_server()\n # localhost will act as client\n bandwidth_limit = self.policy.bwlimit_kbps * 1000.\n for attempt in tobiko.retry(timeout=100., interval=5.):\n try:\n iperf3.assert_has_bandwith_limits(\n address=self.server.ip_address,\n min_bandwith=bandwidth_limit * 0.9,\n max_bandwith=bandwidth_limit * 1.1,\n port=self.server.iperf3_port,\n download=True)\n break\n except sh.ShellCommandFailed as err:\n if ('unable to connect to server: Connection refused'\n in str(err)):\n attempt.check_limits()\n LOG.debug('iperf command failed because the iperf server '\n 'was not ready yet - retrying...')\n else:\n raise err", "def custom_server_priority(srv, ind):\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]", "def test_preemptive_priorities_at_class_change(self):\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [False]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 1)\n self.assertEqual(float(recs[3].waiting_time), 1.5)\n self.assertEqual(float(recs[4].waiting_time), 2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 7)\n self.assertEqual(float(recs[3].service_start_date), 9.5)\n self.assertEqual(float(recs[4].service_start_date), 12)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 9.5)\n self.assertEqual(float(recs[3].service_end_date), 12)\n self.assertEqual(float(recs[4].service_end_date), 14.5)\n\n # Now with preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n all_recs = Q.get_all_records()\n recs = [r for r in all_recs if r.record_type == 'service']\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 5.7)\n self.assertEqual(float(recs[3].waiting_time), 1.2)\n self.assertEqual(float(recs[4].waiting_time), 4.2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 11.7)\n self.assertEqual(float(recs[3].service_start_date), 9.2)\n self.assertEqual(float(recs[4].service_start_date), 14.2)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 14.2)\n self.assertEqual(float(recs[3].service_end_date), 11.7)\n self.assertEqual(float(recs[4].service_end_date), 16.7)\n\n # Test interrupted service data records\n interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service']\n self.assertEqual(len(interrupted_recs), 1)\n self.assertEqual(float(interrupted_recs[0].arrival_date), 6)\n self.assertEqual(float(interrupted_recs[0].service_start_date), 7)\n self.assertEqual(float(interrupted_recs[0].waiting_time), 1)\n self.assertEqual(float(interrupted_recs[0].exit_date), 9.2)\n self.assertEqual(float(interrupted_recs[0].service_time), 2.5)\n self.assertTrue(isnan(interrupted_recs[0].service_end_date))", "def test_non_primary_accepts_pre_prepare_time(looper, txnPoolNodeSet,\n sdk_wallet_client, sdk_pool_handle):\n sdk_send_random_and_check(looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n count=2)\n # send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 2)\n # The replica having the bad clock\n confused_npr = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1]\n\n make_clock_faulty(confused_npr.node)\n\n old_acceptable_rvs = getAllReturnVals(\n confused_npr._ordering_service, confused_npr._ordering_service._is_pre_prepare_time_acceptable)\n old_susp_count = get_timestamp_suspicion_count(confused_npr.node)\n sdk_send_random_and_check(looper,\n txnPoolNodeSet,\n sdk_pool_handle,\n sdk_wallet_client,\n count=2)\n\n assert get_timestamp_suspicion_count(confused_npr.node) > old_susp_count\n\n new_acceptable_rvs = getAllReturnVals(\n confused_npr._ordering_service, confused_npr._ordering_service._is_pre_prepare_time_acceptable)\n\n # `is_pre_prepare_time_acceptable` first returned False then returned True\n assert [True, False, *old_acceptable_rvs] == new_acceptable_rvs", "def testPriority(self):\r\n my_brain = TestBrain._emptyBrain()\r\n hn_module = 'modules.HN'\r\n hn = filter(lambda m: m.__name__ == hn_module, my_brain.modules)[0]\r\n\r\n with patch.object(hn, 'handle') as mocked_handle:\r\n my_brain.query(\"hacker news\")\r\n self.assertTrue(mocked_handle.called)", "def _set_server_status_spare(server, update_only):\n allowed_status = [\n _server.MySQLServer.SECONDARY, _server.MySQLServer.FAULTY\n ]\n status = _server.MySQLServer.SPARE\n mode = _server.MySQLServer.OFFLINE\n previous_status = server.status\n _do_set_status(server, allowed_status, status, mode, update_only)\n\n if previous_status == _server.MySQLServer.FAULTY:\n # Check whether the server is really alive or not.\n _check_requirements(server)\n\n # Configure replication\n if not update_only:\n group = _server.Group.fetch(server.group_id)\n _configure_as_slave(group, server)", "def testServerWeightInt(self):\n proto = 'http'\n self.driver.add_protocol(proto, None)\n self.driver.add_server(proto, 100, '1.2.3.4', 7777, 10)\n servers = self.driver._config[proto]['servers']\n self.assertEqual(len(servers), 1)\n self.assertEqual(servers[0], (100, '1.2.3.4', 7777, 10, False))", "def test_preboost_server(self):\n artifact_id = self.my_create_appliance(\"testpreboost\")\n s.touch_to_state(None, artifact_id, \"Preparing\")\n status = s.check_state(artifact_id)\n self.assertEqual(status, \"Preparing\")", "def test_global_update_propogation_switchover(self):\n status = self.proxy.sharding.lookup_servers(\"1\", 500, \"GLOBAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n global_master = fetch_test_server(row['server_uuid'])\n global_master.connect()\n\n global_master.exec_stmt(\"DROP DATABASE IF EXISTS global_db\")\n global_master.exec_stmt(\"CREATE DATABASE global_db\")\n global_master.exec_stmt(\"CREATE TABLE global_db.global_table\"\n \"(userID INT, name VARCHAR(30))\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(101, 'TEST 1')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(202, 'TEST 2')\")\n\n status = self.proxy.group.promote(\"GROUPID1\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n status = self.proxy.sharding.lookup_servers(\"1\", 500, \"GLOBAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n global_master = fetch_test_server(row['server_uuid'])\n global_master.connect()\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(303, 'TEST 3')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(404, 'TEST 4')\")\n\n status = self.proxy.group.promote(\"GROUPID2\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(505, 'TEST 5')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(606, 'TEST 6')\")\n\n status = self.proxy.group.promote(\"GROUPID3\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(505, 'TEST 7')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(606, 'TEST 8')\")\n\n sleep(5)\n\n status = self.proxy.sharding.lookup_servers(\"db1.t1\", 500, \"LOCAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n shard_server = fetch_test_server(row['server_uuid'])\n shard_server.connect()\n rows = shard_server.exec_stmt(\n \"SELECT NAME FROM global_db.global_table\", {\"fetch\" : True}\n )\n self.assertEqual(len(rows), 8)\n self.assertEqual(rows[0][0], 'TEST 1')\n self.assertEqual(rows[1][0], 'TEST 2')\n self.assertEqual(rows[2][0], 'TEST 3')\n self.assertEqual(rows[3][0], 'TEST 4')\n self.assertEqual(rows[4][0], 'TEST 5')\n self.assertEqual(rows[5][0], 'TEST 6')\n self.assertEqual(rows[6][0], 'TEST 7')\n self.assertEqual(rows[7][0], 'TEST 8')\n\n status = self.proxy.sharding.lookup_servers(\"db1.t1\", 1500, \"LOCAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n shard_server = fetch_test_server(row['server_uuid'])\n shard_server.connect()\n rows = shard_server.exec_stmt(\n \"SELECT NAME FROM global_db.global_table\", {\"fetch\" : True}\n )\n self.assertEqual(len(rows), 8)\n self.assertEqual(rows[0][0], 'TEST 1')\n self.assertEqual(rows[1][0], 'TEST 2')\n self.assertEqual(rows[2][0], 'TEST 3')\n self.assertEqual(rows[3][0], 'TEST 4')\n self.assertEqual(rows[4][0], 'TEST 5')\n self.assertEqual(rows[5][0], 'TEST 6')\n self.assertEqual(rows[6][0], 'TEST 7')\n self.assertEqual(rows[7][0], 'TEST 8')", "def test_server_status(self):\n self.assert_(False)", "def testThresholdPriority(self):\n\n myResourceControl = ResourceControl()\n myResourceControl.insertSite(\"testSite1\", 20, 40, \"testSE1\", \"testCE1\")\n myResourceControl.insertThreshold(\"testSite1\", \"Processing\", 10, 8)\n myResourceControl.insertThreshold(\"testSite1\", \"Merge\", 5, 3)\n\n # test default task priorities\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 4)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 0)\n\n myResourceControl.changeTaskPriority(\"Merge\", 3)\n myResourceControl.changeTaskPriority(\"Processing\", 1)\n\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 3)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 1)\n\n myResourceControl.changeTaskPriority(\"Merge\", 1)\n myResourceControl.changeTaskPriority(\"Processing\", 3)\n\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 1)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 3)\n\n return", "def testRequestResourcesRaceConditionWithMinWorker(self):\n config = copy.deepcopy(MULTI_WORKER_CLUSTER)\n config[\"available_node_types\"] = {\n \"empty_node\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2},\n \"max_workers\": 1,\n },\n \"def_worker\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"WORKER\": 1},\n \"max_workers\": 3,\n \"min_workers\": 1,\n },\n }\n config_path = self.write_config(config)\n self.provider = MockProvider()\n runner = MockProcessRunner()\n runner.respond_to_call(\"json .Config.Env\", [\"[]\" for i in range(2)])\n self.provider.create_node(\n {},\n {\n TAG_RAY_NODE_KIND: NODE_KIND_HEAD,\n TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,\n TAG_RAY_USER_NODE_TYPE: \"empty_node\",\n },\n 1,\n )\n lm = LoadMetrics()\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockGcsClient(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 2, \"WORKER\": 1.0}] * 2)\n autoscaler.update()\n # 2 min worker for both min_worker and request_resources(), not 3.\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})", "def find_best_server(self):\n pass", "def testRequestResourcesRaceConditionsLong(self):\n config = copy.deepcopy(MULTI_WORKER_CLUSTER)\n config[\"max_workers\"] = 4\n config[\"idle_timeout_minutes\"] = 0\n config[\"available_node_types\"] = {\n \"empty_node\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2},\n \"max_workers\": 1,\n },\n \"def_worker\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"WORKER\": 1},\n \"max_workers\": 3,\n \"min_workers\": 1,\n },\n }\n config_path = self.write_config(config)\n self.provider = MockProvider()\n runner = MockProcessRunner()\n runner.respond_to_call(\"json .Config.Env\", [\"[]\" for i in range(3)])\n self.provider.create_node(\n {},\n {\n TAG_RAY_NODE_KIND: NODE_KIND_HEAD,\n TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,\n TAG_RAY_USER_NODE_TYPE: \"empty_node\",\n },\n 1,\n )\n lm = LoadMetrics()\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockGcsClient(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}])\n autoscaler.update()\n # 1 min worker for both min_worker and request_resources()\n self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n non_terminated_nodes = autoscaler.provider.non_terminated_nodes({})\n assert len(non_terminated_nodes) == 2\n node_id = non_terminated_nodes[1]\n node_ip = autoscaler.provider.non_terminated_node_ips({})[1]\n\n # A hack to check if the node was terminated when it shouldn't.\n autoscaler.provider.mock_nodes[node_id].state = \"unterminatable\"\n lm.update(\n node_ip,\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n waiting_bundles=[{\"CPU\": 0.2, \"WORKER\": 1.0}],\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}] * 2)\n autoscaler.update()\n # 2 requested_resource, 1 min worker, 1 free node -> 2 nodes total\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}])\n autoscaler.update()\n # Still 2 because the second one is not connected and hence\n # request_resources occupies the connected node.\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}] * 3)\n lm.update(\n node_ip,\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n {},\n waiting_bundles=[{\"CPU\": 0.2, \"WORKER\": 1.0}] * 3,\n )\n autoscaler.update()\n self.waitForNodes(3, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n autoscaler.load_metrics.set_resource_requests([])\n\n lm.update(\n \"172.0.0.2\",\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n )\n lm.update(\n \"172.0.0.3\",\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n )\n lm.update(\n node_ip,\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n {},\n )\n print(\"============ Should scale down from here =============\", node_id)\n autoscaler.update()\n self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n # If node {node_id} was terminated any time then it's state will be set\n # to terminated.\n assert autoscaler.provider.mock_nodes[node_id].state == \"unterminatable\"", "def test_all_hosts_with_minus_one_spm_priority(self):\n min_priorities = [config.MIN_SPM_PRIORITY]\n self.basic_flow(priorities=min_priorities, hosts=[self.spm_host])\n\n testflow.step(\"Restarting vdsmd on %s\", self.spm_host)\n spm_host_ip = ll_hosts.get_host_ip(self.spm_host)\n test_utils.restartVdsmd(spm_host_ip, config.HOSTS_PW)\n assert ll_hosts.wait_for_hosts_states(\n True, self.spm_host, config.HOST_UP\n ), \"Host %s failed to reach 'UP' state\" % self.spm_host\n\n testflow.step(\"Waiting for SPM to be elected\")\n with pytest.raises(apis_exceptions.APITimeout):\n ll_hosts.wait_for_spm(\n datacenter=config.DATA_CENTER_NAME,\n timeout=WAIT_FOR_SPM_TIMEOUT, sleep=RETRY_INTERVAL\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the server priority function when we prioritise the server with the highest id number.
def test_server_priority_function_allocate_to_last_server_first(self): def get_server_busy_time(server, ind): return -server.id_number ciw.seed(0) Q = ciw.Simulation(ciw.create_network( arrival_distributions=[ciw.dists.Exponential(1)], service_distributions=[ciw.dists.Exponential(2)], number_of_servers=[2], server_priority_functions=[get_server_busy_time] ) ) Q.simulate_until_max_time(1000) expected_times = [158.68745586286119, 331.0719836410557] for i, srv in enumerate(Q.nodes[1].servers): self.assertEqual(srv.busy_time, expected_times[i])
[ "def custom_server_priority(srv, ind):\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]", "def test_server_priority_function_two_nodes(self):\n def prioritise_less_busy(srv, ind):\n return srv.busy_time\n\n def prioritise_highest_id(srv, ind):\n return -srv.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1), ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2), ciw.dists.Exponential(2)],\n number_of_servers=[2, 2],\n routing=[[0, 0], [0, 0]],\n server_priority_functions=[prioritise_less_busy, prioritise_highest_id]\n )\n )\n Q.simulate_until_max_time(1000)\n expected_times_node_1 = [256.2457715650031, 257.59339967047254]\n expected_times_node_2 = [157.35577182806387, 356.41473247082365]\n\n for i, (srv_1, srv_2) in enumerate(zip(Q.nodes[1].servers, Q.nodes[2].servers)):\n self.assertEqual(srv_1.busy_time, expected_times_node_1[i])\n self.assertEqual(srv_2.busy_time, expected_times_node_2[i])", "def test_highest_spm_priority_host_non_responsive(self):\n new_priority = range(1, len(self.hsm_hosts) + 1)\n self.set_priorities(priorities=new_priority, hosts=self.hsm_hosts)\n rhevm_helpers.maintenance_and_activate_hosts(\n hosts=[self.spm_host], activate=False\n )\n logger.info(\n \"Blocking connection between %s and %s\",\n self.high_spm_priority_host, config.VDC\n )\n self.former_spm = self.spm_host\n\n assert storage_helpers.setup_iptables(\n self.host_ip, self.engine_ip, block=True\n ), \"Unable to block connection between %s and %s\" % (\n self.high_spm_priority_host, config.VDC\n )\n self.wait_for_spm_host_and_verify_identity(\n self.low_spm_priority_host\n )", "def find_best_server(self):\n pass", "def test_server_priority_function_allocate_to_less_busy(self):\n def get_server_busy_time(server, ind):\n return server.busy_time\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [245.07547532640024, 244.68396417751663]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_records_correct_server_id(self):\n def custom_server_priority(srv, ind):\n \"\"\"\n A custom server priority function that priortises server 1 for \n customer class 0 and server 2 for customer class 1.\n \"\"\"\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]\n\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Exponential(rate=1.0)], 'Class 1': [ciw.dists.Exponential(rate=1.0)]\n },\n service_distributions={\n 'Class 0': [ciw.dists.Exponential(rate=200.0)], 'Class 1': [ciw.dists.Exponential(rate=200.0)]\n },\n number_of_servers=[2],\n server_priority_functions=[custom_server_priority],\n )\n ciw.seed(0)\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(50)\n\n all_class_0_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0])\n all_class_1_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0])\n\n self.assertTrue(all_class_0_correct)\n self.assertTrue(all_class_1_correct)", "def _set_server_status_primary(server, update_only):\n raise _errors.ServerError(\n \"If you want to make a server (%s) primary, please, use the \"\n \"group.promote function.\" % (server.uuid, )\n )", "def set_priority(self, priority_id, pid):\n try:\n if pid in self.__processes:\n process = psutil.Process(pid)\n #since only superuser can decrease nice.\n priority_id = priority_id if process.nice() <= priority_id else process.nice()\n process.nice(priority_id)\n return \"OK\"\n else:\n return \"That process is not yours\"\n except psutil.NoSuchProcess as e:\n return f\"Error: {e}\"", "def testServerWeightInt(self):\n proto = 'http'\n self.driver.add_protocol(proto, None)\n self.driver.add_server(proto, 100, '1.2.3.4', 7777, 10)\n servers = self.driver._config[proto]['servers']\n self.assertEqual(len(servers), 1)\n self.assertEqual(servers[0], (100, '1.2.3.4', 7777, 10, False))", "def test_db_illegal_spm_priority_value(self):\n testflow.step(\n \"Change SPM priority to %s in the DB to %s\", self.spm_host,\n config.LARGER_THAN_MAX_SPM_PRIORITY\n )\n status = False\n try:\n ll_hosts.set_spm_priority_in_db(\n host_name=self.spm_host,\n spm_priority=config.LARGER_THAN_MAX_SPM_PRIORITY,\n engine=config.ENGINE\n ), \"SPM priority on the DB for host '%s' changed to '%s'\" % (\n self.spm_host, config.LARGER_THAN_MAX_SPM_PRIORITY\n )\n # Exception is raised from engine.db.psql in rrmngmt\n except Exception:\n status = True\n assert status, (\n \"SPM priority on the DB for host '%s' changed to '%s'\" % (\n self.spm_host, config.MIN_SPM_PRIORITY + 1\n )\n )\n testflow.step(\n \"Change SPM priority to %s in the DB to %s\", self.spm_host,\n config.BELOW_MIN_SPM_PRIORITY\n )\n status = False\n try:\n ll_hosts.set_spm_priority_in_db(\n host_name=self.spm_host,\n spm_priority=config.BELOW_MIN_SPM_PRIORITY,\n engine=config.ENGINE\n )\n except Exception:\n status = True\n assert status, (\n \"SPM priority on the DB for host '%s' changed to '%s'\" % (\n self.spm_host, config.BELOW_MIN_SPM_PRIORITY\n )\n )", "def find_highest_priority(self):\n\t\tif self.RL.system.list:\n\t\t\treturn self.RL.system.list[0]\n\t\telif self.RL.user.list:\n\t\t\treturn self.RL.user.list[0]\n\t\telse:\n\t\t\treturn self.RL.init.list[0]", "def testPriority(self):\n element = WorkQueueElement(RequestName='backend_test',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n highprielement = WorkQueueElement(RequestName='backend_test_high',\n WMSpec=self.processingSpec,\n Status='Available', Jobs=10,\n SiteWhitelist=[\"place\"],\n Priority=100)\n element2 = WorkQueueElement(RequestName='backend_test_2',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n element3 = WorkQueueElement(RequestName='backend_test_3',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n lowprielement = WorkQueueElement(RequestName='backend_test_low',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=0.1)\n self.backend.insertElements([element])\n self.backend.availableWork({'place': 1000}, {})\n # timestamp in elements have second coarseness, 2nd element must\n # have a higher timestamp to force it after the 1st\n time.sleep(1)\n self.backend.insertElements([lowprielement, element2, highprielement])\n self.backend.availableWork({'place': 1000}, {})\n time.sleep(1)\n self.backend.insertElements([element3])\n work = self.backend.availableWork({'place': 1000}, {})\n # order should be high to low, with the standard elements in the order\n # they were queueud\n self.assertEqual([x['RequestName'] for x in work[0]],\n ['backend_test_high', 'backend_test', 'backend_test_2',\n 'backend_test_3', 'backend_test_low'])", "def get_highest_preference(self, routes):\n # TODO\n outroutes = []\n highest = routes[0]\n tied = []\n for r in routes:\n pref = r.localpref\n if pref > highest.localpref:\n highest = r\n tied = []\n elif pref == highest.localpref and r != highest:\n tied.append(r)\n outroutes = tied\n outroutes.append(highest)\n for r in outroutes:\n print(\"networks: \" + r.ntwork)\n print(\"next func\")\n return outroutes", "def set_max_process_priority():\n # children processes inherit niceness from father\n try:\n LOG.warning(\n 'Setting freezer execution with high CPU and I/O priority')\n pid = os.getpid()\n # Set cpu priority\n os.nice(-19)\n # Set I/O Priority to Real Time class with level 0\n subprocess.call([\n u'{0}'.format(find_executable(\"ionice\")),\n u'-c', u'1', u'-n', u'0', u'-t',\n u'-p', u'{0}'.format(pid)\n ])\n except Exception as priority_error:\n LOG.warning('Priority: {0}'.format(priority_error))", "def testPriority(self):\r\n my_brain = TestBrain._emptyBrain()\r\n hn_module = 'modules.HN'\r\n hn = filter(lambda m: m.__name__ == hn_module, my_brain.modules)[0]\r\n\r\n with patch.object(hn, 'handle') as mocked_handle:\r\n my_brain.query(\"hacker news\")\r\n self.assertTrue(mocked_handle.called)", "def test_server_qos_policy_id(self):\n self.assertIsNone(self.server.port_details['qos_policy_id'])", "def test_global_update_propogation_switchover(self):\n status = self.proxy.sharding.lookup_servers(\"1\", 500, \"GLOBAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n global_master = fetch_test_server(row['server_uuid'])\n global_master.connect()\n\n global_master.exec_stmt(\"DROP DATABASE IF EXISTS global_db\")\n global_master.exec_stmt(\"CREATE DATABASE global_db\")\n global_master.exec_stmt(\"CREATE TABLE global_db.global_table\"\n \"(userID INT, name VARCHAR(30))\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(101, 'TEST 1')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(202, 'TEST 2')\")\n\n status = self.proxy.group.promote(\"GROUPID1\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n status = self.proxy.sharding.lookup_servers(\"1\", 500, \"GLOBAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n global_master = fetch_test_server(row['server_uuid'])\n global_master.connect()\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(303, 'TEST 3')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(404, 'TEST 4')\")\n\n status = self.proxy.group.promote(\"GROUPID2\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(505, 'TEST 5')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(606, 'TEST 6')\")\n\n status = self.proxy.group.promote(\"GROUPID3\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(505, 'TEST 7')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(606, 'TEST 8')\")\n\n sleep(5)\n\n status = self.proxy.sharding.lookup_servers(\"db1.t1\", 500, \"LOCAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n shard_server = fetch_test_server(row['server_uuid'])\n shard_server.connect()\n rows = shard_server.exec_stmt(\n \"SELECT NAME FROM global_db.global_table\", {\"fetch\" : True}\n )\n self.assertEqual(len(rows), 8)\n self.assertEqual(rows[0][0], 'TEST 1')\n self.assertEqual(rows[1][0], 'TEST 2')\n self.assertEqual(rows[2][0], 'TEST 3')\n self.assertEqual(rows[3][0], 'TEST 4')\n self.assertEqual(rows[4][0], 'TEST 5')\n self.assertEqual(rows[5][0], 'TEST 6')\n self.assertEqual(rows[6][0], 'TEST 7')\n self.assertEqual(rows[7][0], 'TEST 8')\n\n status = self.proxy.sharding.lookup_servers(\"db1.t1\", 1500, \"LOCAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n shard_server = fetch_test_server(row['server_uuid'])\n shard_server.connect()\n rows = shard_server.exec_stmt(\n \"SELECT NAME FROM global_db.global_table\", {\"fetch\" : True}\n )\n self.assertEqual(len(rows), 8)\n self.assertEqual(rows[0][0], 'TEST 1')\n self.assertEqual(rows[1][0], 'TEST 2')\n self.assertEqual(rows[2][0], 'TEST 3')\n self.assertEqual(rows[3][0], 'TEST 4')\n self.assertEqual(rows[4][0], 'TEST 5')\n self.assertEqual(rows[5][0], 'TEST 6')\n self.assertEqual(rows[6][0], 'TEST 7')\n self.assertEqual(rows[7][0], 'TEST 8')", "def test_priority_change_ok(self):\n self.execute('priority change major normal')\n rv, output = self.execute('priority list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def test_cmp_by_priority_prefer_installed_replace(self):\n def _assert_sort_by_priority(package, r_sorted_packages):\n remote_repository = Repository(packages)\n pool = Pool([remote_repository])\n policy = DefaultPolicy()\n\n # We reverse the list to ensure queue is not originally in the\n # final order\n queue = reversed(packages)\n def _cmp(a, b):\n return policy.cmp_by_priority_prefer_installed(pool, {}, a, b)\n\n self.assertEqual(r_sorted_packages, sorted_with_cmp(queue, cmp=_cmp))\n\n scikits_0_12_0 = P(\"scikits_learn-0.12.0\")\n sklearn_0_13_0 = P(\"sklearn-0.13.0\")\n packages = [sklearn_0_13_0, scikits_0_12_0]\n r_sorted_packages = [sklearn_0_13_0, scikits_0_12_0]\n\n _assert_sort_by_priority(packages, r_sorted_packages)\n\n scikits_0_12_0 = P(\"scikits_learn-0.12.0\")\n sklearn_0_13_0 = P(\"sklearn-0.13.0; replaces (scikits_learn < 0.13.0)\")\n packages = [sklearn_0_13_0, scikits_0_12_0]\n r_sorted_packages = [scikits_0_12_0, sklearn_0_13_0]\n\n _assert_sort_by_priority(packages, r_sorted_packages)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the server priority function with two nodes that each has a different priority rule.
def test_server_priority_function_two_nodes(self): def prioritise_less_busy(srv, ind): return srv.busy_time def prioritise_highest_id(srv, ind): return -srv.id_number ciw.seed(0) Q = ciw.Simulation(ciw.create_network( arrival_distributions=[ciw.dists.Exponential(1), ciw.dists.Exponential(1)], service_distributions=[ciw.dists.Exponential(2), ciw.dists.Exponential(2)], number_of_servers=[2, 2], routing=[[0, 0], [0, 0]], server_priority_functions=[prioritise_less_busy, prioritise_highest_id] ) ) Q.simulate_until_max_time(1000) expected_times_node_1 = [256.2457715650031, 257.59339967047254] expected_times_node_2 = [157.35577182806387, 356.41473247082365] for i, (srv_1, srv_2) in enumerate(zip(Q.nodes[1].servers, Q.nodes[2].servers)): self.assertEqual(srv_1.busy_time, expected_times_node_1[i]) self.assertEqual(srv_2.busy_time, expected_times_node_2[i])
[ "def test_server_priority_function_allocate_to_less_busy(self):\n def get_server_busy_time(server, ind):\n return server.busy_time\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [245.07547532640024, 244.68396417751663]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_server_priority_function_allocate_to_last_server_first(self):\n def get_server_busy_time(server, ind):\n return -server.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [158.68745586286119, 331.0719836410557]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_two_hosts_swap_priorities(self):\n self.basic_flow()\n testflow.step(\n \"Swapping SPM priorities between host %s and %s\",\n self.high_spm_priority_host, self.low_spm_priority_host\n )\n self.high_spm_priority_host, self.low_spm_priority_host = (\n self.low_spm_priority_host, self.high_spm_priority_host\n )\n self.hosts = [self.high_spm_priority_host, self.low_spm_priority_host]\n self.basic_flow()", "def custom_server_priority(srv, ind):\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]", "def testThresholdPriority(self):\n\n myResourceControl = ResourceControl()\n myResourceControl.insertSite(\"testSite1\", 20, 40, \"testSE1\", \"testCE1\")\n myResourceControl.insertThreshold(\"testSite1\", \"Processing\", 10, 8)\n myResourceControl.insertThreshold(\"testSite1\", \"Merge\", 5, 3)\n\n # test default task priorities\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 4)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 0)\n\n myResourceControl.changeTaskPriority(\"Merge\", 3)\n myResourceControl.changeTaskPriority(\"Processing\", 1)\n\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 3)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 1)\n\n myResourceControl.changeTaskPriority(\"Merge\", 1)\n myResourceControl.changeTaskPriority(\"Processing\", 3)\n\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 1)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 3)\n\n return", "def thread_cmp(x, y):\n return cmp(y.get_priority(), x.get_priority())", "def testPriority(self):\n element = WorkQueueElement(RequestName='backend_test',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n highprielement = WorkQueueElement(RequestName='backend_test_high',\n WMSpec=self.processingSpec,\n Status='Available', Jobs=10,\n SiteWhitelist=[\"place\"],\n Priority=100)\n element2 = WorkQueueElement(RequestName='backend_test_2',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n element3 = WorkQueueElement(RequestName='backend_test_3',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n lowprielement = WorkQueueElement(RequestName='backend_test_low',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=0.1)\n self.backend.insertElements([element])\n self.backend.availableWork({'place': 1000}, {})\n # timestamp in elements have second coarseness, 2nd element must\n # have a higher timestamp to force it after the 1st\n time.sleep(1)\n self.backend.insertElements([lowprielement, element2, highprielement])\n self.backend.availableWork({'place': 1000}, {})\n time.sleep(1)\n self.backend.insertElements([element3])\n work = self.backend.availableWork({'place': 1000}, {})\n # order should be high to low, with the standard elements in the order\n # they were queueud\n self.assertEqual([x['RequestName'] for x in work[0]],\n ['backend_test_high', 'backend_test', 'backend_test_2',\n 'backend_test_3', 'backend_test_low'])", "def test_cmp_by_priority_prefer_installed_multi_repositories(self):\n numpy_1_6_0 = P(\"numpy-1.6.0\")\n numpy_1_6_1 = P(\"numpy-1.6.1\")\n numpy_1_7_0 = P(\"numpy-1.7.0\")\n i_numpy_1_6_0 = P(\"numpy-1.6.0\")\n\n remote_repository = Repository([numpy_1_7_0, numpy_1_6_1, numpy_1_6_0], \"remote\")\n installed_repository = Repository([i_numpy_1_6_0], \"installed\")\n\n r_sorted_packages = [i_numpy_1_6_0, numpy_1_7_0, numpy_1_6_1, numpy_1_6_0]\n\n pool = Pool([installed_repository, remote_repository])\n pool.set_repository_order(\"installed\", \"remote\")\n policy = DefaultPolicy()\n\n queue = [numpy_1_7_0, i_numpy_1_6_0, numpy_1_6_0, numpy_1_6_1]\n def _cmp(a, b):\n return policy.cmp_by_priority_prefer_installed(pool, {}, a, b)\n\n self.assertEqual(r_sorted_packages, sorted_with_cmp(queue, cmp=_cmp))", "def test_cmp_by_priority_prefer_installed_same_repository_simple(self):\n numpy_1_6_0 = P(\"numpy-1.6.0\")\n numpy_1_6_1 = P(\"numpy-1.6.1\")\n numpy_1_7_0 = P(\"numpy-1.7.0\")\n\n remote_repository = Repository([numpy_1_7_0, numpy_1_6_1, numpy_1_6_0], \"remote\")\n r_sorted_packages = [numpy_1_7_0, numpy_1_6_1, numpy_1_6_0]\n\n pool = Pool([remote_repository])\n policy = DefaultPolicy()\n\n queue = [numpy_1_7_0, numpy_1_6_0, numpy_1_6_1]\n def _cmp(a, b):\n return policy.cmp_by_priority_prefer_installed(pool, {}, a, b)\n\n self.assertEqual(r_sorted_packages, sorted_with_cmp(queue, cmp=_cmp))", "def test_gatewaynodes_on_same_communities_different_network(self):\n sourceNode =self._NODES[0]\n destinationNode = self._NODES[1]\n self._setupNodePair(sourceNode, destinationNode, \n destinationNetworkId=\"Test Source Network\",\n sourceIsGateway =True,\n destinationIsGateway=True )\n \n #populate the node with test data.\n data = json.load(file(_TEST_DATA_PATH))\n sourceNode.publishResourceData(data[\"documents\"])\n self._doDistributeTest(sourceNode, destinationNode)\n assert sourceNode.compareDistributedResources(destinationNode), \\\n \"\"\"Distribute between two gateway nodes on the same community but\n different network and no filter on the destination node.\"\"\"", "def test_ping(topology, step):\n # Setup which shell to use\n shell = 'bash'\n\n hs1 = topology.get('hs1')\n hs2 = topology.get('hs2')\n hs3 = topology.get('hs3')\n\n ping_hs1_to_hs2 = hs1.libs.ping.ping(1, '192.168.15.2', shell=shell)\n ping_hs2_to_hs1 = hs2.libs.ping.ping(1, '192.168.15.1', shell=shell)\n\n assert ping_hs1_to_hs2['transmitted'] == ping_hs1_to_hs2['received'] == 1\n assert ping_hs2_to_hs1['transmitted'] == ping_hs2_to_hs1['received'] == 1\n\n # Should not work, host 3 is not in the same subnet as the other 2 hosts\n # We should implement this with ping's communication library once the\n # \"network unreachable\" scenario is supported by uncommenting the following\n # three lines\n # no_ping = hs3.libs.ping.ping(1, '192.168.15.1', shell=shell)\n # assert no_ping['transmitted'] == 1\n # assert no_ping['received'] == 0\n no_ping = hs3('ping -c 1 192.168.15.1', shell=shell)\n assert 'Network is unreachable' in no_ping\n\n # Should not work, not node exists with that ip\n no_ping = hs2.libs.ping.ping(1, '192.168.15.3')\n assert no_ping['transmitted'] == 1\n assert no_ping['received'] == 0", "def test_cmp_by_priority_prefer_installed_replace(self):\n def _assert_sort_by_priority(package, r_sorted_packages):\n remote_repository = Repository(packages)\n pool = Pool([remote_repository])\n policy = DefaultPolicy()\n\n # We reverse the list to ensure queue is not originally in the\n # final order\n queue = reversed(packages)\n def _cmp(a, b):\n return policy.cmp_by_priority_prefer_installed(pool, {}, a, b)\n\n self.assertEqual(r_sorted_packages, sorted_with_cmp(queue, cmp=_cmp))\n\n scikits_0_12_0 = P(\"scikits_learn-0.12.0\")\n sklearn_0_13_0 = P(\"sklearn-0.13.0\")\n packages = [sklearn_0_13_0, scikits_0_12_0]\n r_sorted_packages = [sklearn_0_13_0, scikits_0_12_0]\n\n _assert_sort_by_priority(packages, r_sorted_packages)\n\n scikits_0_12_0 = P(\"scikits_learn-0.12.0\")\n sklearn_0_13_0 = P(\"sklearn-0.13.0; replaces (scikits_learn < 0.13.0)\")\n packages = [sklearn_0_13_0, scikits_0_12_0]\n r_sorted_packages = [scikits_0_12_0, sklearn_0_13_0]\n\n _assert_sort_by_priority(packages, r_sorted_packages)", "def test_common_to_gateway_same_community_network(self):\n \n sourceNode =self._NODES[0]\n destinationNode = self._NODES[1]\n self._setupNodePair(sourceNode, destinationNode, \n destinationIsGateway =True)\n \n #populate the node with test data.\n data = json.load(file(_TEST_DATA_PATH))\n sourceNode.publishResourceData(data[\"documents\"])\n self._doDistributeTest(sourceNode, destinationNode)\n # There should be no replication. Destination node should be \n # empty of resource_data docs\n assert sourceNode.compareDistributedResources(destinationNode), \\\n \"\"\"Distribution from a common node to gateway node should work\"\"\"", "def test_priority_change_ok(self):\n self.execute('priority change major normal')\n rv, output = self.execute('priority list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def test_highest_spm_priority_host_non_responsive(self):\n new_priority = range(1, len(self.hsm_hosts) + 1)\n self.set_priorities(priorities=new_priority, hosts=self.hsm_hosts)\n rhevm_helpers.maintenance_and_activate_hosts(\n hosts=[self.spm_host], activate=False\n )\n logger.info(\n \"Blocking connection between %s and %s\",\n self.high_spm_priority_host, config.VDC\n )\n self.former_spm = self.spm_host\n\n assert storage_helpers.setup_iptables(\n self.host_ip, self.engine_ip, block=True\n ), \"Unable to block connection between %s and %s\" % (\n self.high_spm_priority_host, config.VDC\n )\n self.wait_for_spm_host_and_verify_identity(\n self.low_spm_priority_host\n )", "def test_reprioritise_one_student_two_request_other_student_one_request(student1_problem1,student1_problem2,student2_problem1,student1_problem3,student2_problem3):\n end()\n student1,problem1 = student1_problem1\n make_request(student1,problem1)\n help(student1)\n resolve(student1)\n\n student1,problem2 = student1_problem2\n make_request(student1,problem2)\n help(student1)\n resolve(student1)\n\n student2,problem1 = student2_problem1\n make_request(student2,problem1)\n help(student2)\n resolve(student2)\n\n student1,problem3 = student1_problem3\n make_request(student1,problem3)\n student2,problem3 = student2_problem3\n make_request(student2,problem3)\n reprioritise()\n\n # second student has higher priority.\n assert(queue()[0]['zid'] == student2)\n\n end()\n assert not queue()", "def test_priority():\n layout = bs2051.get_layout(\"9+10+3\").without_lfe\n handler = EgoChannelLockHandler(layout)\n\n priority_order = [\n \"M+000\",\n \"M-030\",\n \"M+030\",\n \"M-060\",\n \"M+060\",\n \"M-090\",\n \"M+090\",\n \"M-135\",\n \"M+135\",\n \"M+180\",\n \"B+000\",\n \"B-045\",\n \"B+045\",\n \"U+000\",\n \"U-045\",\n \"U+045\",\n \"U-090\",\n \"U+090\",\n \"U-135\",\n \"U+135\",\n \"U+180\",\n \"T+000\",\n ]\n\n for i, (name, priority) in enumerate(\n zip(layout.channel_names, handler.channel_priority)\n ):\n assert priority_order.index(name) == priority", "def prioritize_goal(first_priority_goal, second_priority_goal):\n\n stronger_assumptions_list = []\n\n for contract in first_priority_goal.get_contracts():\n stronger_assumptions_list.append(And(contract.get_list_assumptions()))\n\n print(second_priority_goal)\n\n for contract in second_priority_goal.get_contracts():\n contract.add_assumptions(Not(Or(stronger_assumptions_list)))\n\n print(second_priority_goal)", "def test_global_update_propogation_switchover(self):\n status = self.proxy.sharding.lookup_servers(\"1\", 500, \"GLOBAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n global_master = fetch_test_server(row['server_uuid'])\n global_master.connect()\n\n global_master.exec_stmt(\"DROP DATABASE IF EXISTS global_db\")\n global_master.exec_stmt(\"CREATE DATABASE global_db\")\n global_master.exec_stmt(\"CREATE TABLE global_db.global_table\"\n \"(userID INT, name VARCHAR(30))\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(101, 'TEST 1')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(202, 'TEST 2')\")\n\n status = self.proxy.group.promote(\"GROUPID1\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n status = self.proxy.sharding.lookup_servers(\"1\", 500, \"GLOBAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n global_master = fetch_test_server(row['server_uuid'])\n global_master.connect()\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(303, 'TEST 3')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(404, 'TEST 4')\")\n\n status = self.proxy.group.promote(\"GROUPID2\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(505, 'TEST 5')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(606, 'TEST 6')\")\n\n status = self.proxy.group.promote(\"GROUPID3\")\n self.check_xmlrpc_command_result(status)\n\n sleep(5)\n\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(505, 'TEST 7')\")\n global_master.exec_stmt(\"INSERT INTO global_db.global_table \"\n \"VALUES(606, 'TEST 8')\")\n\n sleep(5)\n\n status = self.proxy.sharding.lookup_servers(\"db1.t1\", 500, \"LOCAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n shard_server = fetch_test_server(row['server_uuid'])\n shard_server.connect()\n rows = shard_server.exec_stmt(\n \"SELECT NAME FROM global_db.global_table\", {\"fetch\" : True}\n )\n self.assertEqual(len(rows), 8)\n self.assertEqual(rows[0][0], 'TEST 1')\n self.assertEqual(rows[1][0], 'TEST 2')\n self.assertEqual(rows[2][0], 'TEST 3')\n self.assertEqual(rows[3][0], 'TEST 4')\n self.assertEqual(rows[4][0], 'TEST 5')\n self.assertEqual(rows[5][0], 'TEST 6')\n self.assertEqual(rows[6][0], 'TEST 7')\n self.assertEqual(rows[7][0], 'TEST 8')\n\n status = self.proxy.sharding.lookup_servers(\"db1.t1\", 1500, \"LOCAL\")\n for row in self.check_xmlrpc_iter(status):\n if row['status'] == MySQLServer.PRIMARY:\n shard_server = fetch_test_server(row['server_uuid'])\n shard_server.connect()\n rows = shard_server.exec_stmt(\n \"SELECT NAME FROM global_db.global_table\", {\"fetch\" : True}\n )\n self.assertEqual(len(rows), 8)\n self.assertEqual(rows[0][0], 'TEST 1')\n self.assertEqual(rows[1][0], 'TEST 2')\n self.assertEqual(rows[2][0], 'TEST 3')\n self.assertEqual(rows[3][0], 'TEST 4')\n self.assertEqual(rows[4][0], 'TEST 5')\n self.assertEqual(rows[5][0], 'TEST 6')\n self.assertEqual(rows[6][0], 'TEST 7')\n self.assertEqual(rows[7][0], 'TEST 8')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the server id is recorded correctly.
def test_records_correct_server_id(self): def custom_server_priority(srv, ind): """ A custom server priority function that priortises server 1 for customer class 0 and server 2 for customer class 1. """ if ind.customer_class == 0: priorities = {1: 0, 2: 1} return priorities[srv.id_number] if ind.customer_class == 1: priorities = {1: 1, 2: 0} return priorities[srv.id_number] N = ciw.create_network( arrival_distributions={ 'Class 0': [ciw.dists.Exponential(rate=1.0)], 'Class 1': [ciw.dists.Exponential(rate=1.0)] }, service_distributions={ 'Class 0': [ciw.dists.Exponential(rate=200.0)], 'Class 1': [ciw.dists.Exponential(rate=200.0)] }, number_of_servers=[2], server_priority_functions=[custom_server_priority], ) ciw.seed(0) Q = ciw.Simulation(N) Q.simulate_until_max_time(50) all_class_0_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0]) all_class_1_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0]) self.assertTrue(all_class_0_correct) self.assertTrue(all_class_1_correct)
[ "def get_server_id(self):", "def test_server_region_and_id(appliance_ip):\n region = store.current_appliance.server_region()\n if region == 0:\n pytest.skip(\"Can't check this if the region is 0\")\n assert str(store.current_appliance.server_id()).startswith(str(region))", "def testID(self):\n rpcid = random.randint(1, 10)\n res = self.jsonrpc_req(rpcid, 'hello', [])\n self.assertEqual(res['id'], rpcid)", "def test_valid_sid(self):\n valid_sid = self.session.sid\n self.assertTrue(stage_one(self.ccd, valid_sid))", "def test_remotehosts_id_put(self):\n pass", "def get_server_id():\n return getattr(seaserv, 'SERVER_ID', '-')", "def test_id():\n assert Packet12.id == 12", "def test_vicars_id_get(self):\n pass", "def test_server(self):\n self.assertIsNotNone(index.SERVER)", "def test_get_song_id(self):\n # add song\n sid_run1 = self.mud.get_song_id(self.test_song)\n # add song again, songid shoudl be same as bevor\n sid = self.mud.get_song_id(self.test_song)\n self.assertEqual(sid, sid_run1)\n self.assertTrue(isinstance(sid, int))", "def test_get_id(self):\n player1 = player.Player(1, \"Kalle\")\n exp = player1.player_id\n res = player1.get_id()\n self.assertEqual(exp, res)", "def test_set_id(self):\n test_id = 5\n self.test_manager.set_id(test_id)\n self.assertEqual(self.test_manager.get_id(), test_id)\n self.test_manager.set_id(self.NO_ID)\n self.assertEqual(self.test_manager.get_id(), self.NO_ID)", "def test_musicals_id_get(self):\n pass", "def test_id():\n assert Packet1.id == 1", "def test_get_server_details_non_existent_id(self):\n self.assertRaises(exceptions.NotFound, self.client.get_server,\n 'junk-123ab-45cd')", "def test_murderers_id_get(self):\n pass", "def test_team_id(self):\r\n self.nba_teamgame.team_id()\r\n self.assertEqual((self.nba_teamgame.num),1610612741,'incorrect team number')", "def test_hash_id(self):\n self.assertEqual(hash_id(self.id1, self.id2, self.salt, self.length), \"2Y7W5d\")", "def test_integration_getID(self):\n bridge = bridges.Bridge(self.nickname, self.ip, self.orport,\n self.fingerprint)\n self.assertEqual(self.id_digest, bridge.getID())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A custom server priority function that priortises server 1 for customer class 0 and server 2 for customer class 1.
def custom_server_priority(srv, ind): if ind.customer_class == 0: priorities = {1: 0, 2: 1} return priorities[srv.id_number] if ind.customer_class == 1: priorities = {1: 1, 2: 0} return priorities[srv.id_number]
[ "def test_server_priority_function_two_nodes(self):\n def prioritise_less_busy(srv, ind):\n return srv.busy_time\n\n def prioritise_highest_id(srv, ind):\n return -srv.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1), ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2), ciw.dists.Exponential(2)],\n number_of_servers=[2, 2],\n routing=[[0, 0], [0, 0]],\n server_priority_functions=[prioritise_less_busy, prioritise_highest_id]\n )\n )\n Q.simulate_until_max_time(1000)\n expected_times_node_1 = [256.2457715650031, 257.59339967047254]\n expected_times_node_2 = [157.35577182806387, 356.41473247082365]\n\n for i, (srv_1, srv_2) in enumerate(zip(Q.nodes[1].servers, Q.nodes[2].servers)):\n self.assertEqual(srv_1.busy_time, expected_times_node_1[i])\n self.assertEqual(srv_2.busy_time, expected_times_node_2[i])", "def test_server_priority_function_allocate_to_last_server_first(self):\n def get_server_busy_time(server, ind):\n return -server.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [158.68745586286119, 331.0719836410557]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_server_priority_function_allocate_to_less_busy(self):\n def get_server_busy_time(server, ind):\n return server.busy_time\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [245.07547532640024, 244.68396417751663]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_records_correct_server_id(self):\n def custom_server_priority(srv, ind):\n \"\"\"\n A custom server priority function that priortises server 1 for \n customer class 0 and server 2 for customer class 1.\n \"\"\"\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]\n\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Exponential(rate=1.0)], 'Class 1': [ciw.dists.Exponential(rate=1.0)]\n },\n service_distributions={\n 'Class 0': [ciw.dists.Exponential(rate=200.0)], 'Class 1': [ciw.dists.Exponential(rate=200.0)]\n },\n number_of_servers=[2],\n server_priority_functions=[custom_server_priority],\n )\n ciw.seed(0)\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(50)\n\n all_class_0_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0])\n all_class_1_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0])\n\n self.assertTrue(all_class_0_correct)\n self.assertTrue(all_class_1_correct)", "def find_best_server(self):\n pass", "def test_preemptive_priorities_at_class_change(self):\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [False]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 1)\n self.assertEqual(float(recs[3].waiting_time), 1.5)\n self.assertEqual(float(recs[4].waiting_time), 2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 7)\n self.assertEqual(float(recs[3].service_start_date), 9.5)\n self.assertEqual(float(recs[4].service_start_date), 12)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 9.5)\n self.assertEqual(float(recs[3].service_end_date), 12)\n self.assertEqual(float(recs[4].service_end_date), 14.5)\n\n # Now with preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n all_recs = Q.get_all_records()\n recs = [r for r in all_recs if r.record_type == 'service']\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 5.7)\n self.assertEqual(float(recs[3].waiting_time), 1.2)\n self.assertEqual(float(recs[4].waiting_time), 4.2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 11.7)\n self.assertEqual(float(recs[3].service_start_date), 9.2)\n self.assertEqual(float(recs[4].service_start_date), 14.2)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 14.2)\n self.assertEqual(float(recs[3].service_end_date), 11.7)\n self.assertEqual(float(recs[4].service_end_date), 16.7)\n\n # Test interrupted service data records\n interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service']\n self.assertEqual(len(interrupted_recs), 1)\n self.assertEqual(float(interrupted_recs[0].arrival_date), 6)\n self.assertEqual(float(interrupted_recs[0].service_start_date), 7)\n self.assertEqual(float(interrupted_recs[0].waiting_time), 1)\n self.assertEqual(float(interrupted_recs[0].exit_date), 9.2)\n self.assertEqual(float(interrupted_recs[0].service_time), 2.5)\n self.assertTrue(isnan(interrupted_recs[0].service_end_date))", "def _set_server_status_primary(server, update_only):\n raise _errors.ServerError(\n \"If you want to make a server (%s) primary, please, use the \"\n \"group.promote function.\" % (server.uuid, )\n )", "def testServerWeightInt(self):\n proto = 'http'\n self.driver.add_protocol(proto, None)\n self.driver.add_server(proto, 100, '1.2.3.4', 7777, 10)\n servers = self.driver._config[proto]['servers']\n self.assertEqual(len(servers), 1)\n self.assertEqual(servers[0], (100, '1.2.3.4', 7777, 10, False))", "def set_deployment_priorities(cls, nodes):\n prior = Priority()\n\n primary_swift_proxy_piror = prior.next\n for n in cls.by_role(nodes, 'primary-swift-proxy'):\n n['priority'] = primary_swift_proxy_piror\n\n swift_proxy_prior = prior.next\n for n in cls.by_role(nodes, 'swift-proxy'):\n n['priority'] = swift_proxy_prior\n\n storage_prior = prior.next\n for n in cls.not_roles(nodes, 'storage'):\n n['priority'] = storage_prior\n\n # Controllers deployed one by one\n for n in cls.by_role(nodes, 'primary-controller'):\n n['priority'] = prior.next\n\n for n in cls.by_role(nodes, 'controller'):\n n['priority'] = prior.next\n\n other_nodes_prior = prior.next\n for n in cls.not_roles(nodes, ['primary-swift-proxy',\n 'swift-proxy',\n 'storage',\n 'primary-controller',\n 'controller',\n 'quantum']):\n n['priority'] = other_nodes_prior", "def request_register_default_worker_servers(self, req):\n for idx in range(8):\n self._server_pool.add(\"apscn{:02d}.mpifr-be.mkat.karoo.kat.ac.za\".format(idx), 6000)\n return (\"ok\",)", "def testThresholdPriority(self):\n\n myResourceControl = ResourceControl()\n myResourceControl.insertSite(\"testSite1\", 20, 40, \"testSE1\", \"testCE1\")\n myResourceControl.insertThreshold(\"testSite1\", \"Processing\", 10, 8)\n myResourceControl.insertThreshold(\"testSite1\", \"Merge\", 5, 3)\n\n # test default task priorities\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 4)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 0)\n\n myResourceControl.changeTaskPriority(\"Merge\", 3)\n myResourceControl.changeTaskPriority(\"Processing\", 1)\n\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 3)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 1)\n\n myResourceControl.changeTaskPriority(\"Merge\", 1)\n myResourceControl.changeTaskPriority(\"Processing\", 3)\n\n result = myResourceControl.listThresholdsForSubmit()\n self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 1)\n self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 3)\n\n return", "def thread_cmp(x, y):\n return cmp(y.get_priority(), x.get_priority())", "def number_packets_server_client(self):\n # Local variables to avoid having very long lines\n cli_serv = self._number_packets_client_server\n serv_cli = self._number_packets_server_client\n total = self.total_number_packets\n\n # If the value that is asked is not initialized\n if serv_cli is None:\n\n # If the other one isn't initialized too, compute both of them\n if cli_serv is None:\n serv_cli = 0\n for p in self._packets:\n if p[IP].src == self._server:\n serv_cli += 1\n self._number_packets_server_client = serv_cli\n self._number_packets_client_server = total - serv_cli\n\n # If the other one is initialized get the value of this one from it\n else:\n self._number_packets_server_client = total - cli_serv\n\n # Return the asked value\n return self._number_packets_server_client", "def save(self, *args, **kwargs):\n if not self.pk: # inserting new feature request\n if self.status == 'A':\n max_priority = Features.objects.filter(client=self.client, status='A').aggregate(\n Max('priority'))['priority__max']\n if self.priority ==0:\n pass\n elif max_priority is not None and self.priority > max_priority:\n self.priority = max_priority + 1\n else:\n Features.objects.filter(client=self.client, priority__gte=self.priority,\n ).exclude(priority=0).update(priority=F('priority') + 1)\n else:\n self.priority = 0\n else: # updating feature request\n old_feature_object = Features.objects.get(pk=self.pk)\n old_priority = old_feature_object.priority\n old_status = old_feature_object.status\n self.client = old_feature_object.client # client can not be modified\n new_priority = self.priority\n new_status = self.status\n if new_priority == old_priority and new_status == old_status:\n pass # no reordering required\n else:\n if new_status == 'A':\n if old_priority == 0:\n Features.objects.filter(client=self.client, priority__gte=new_priority,\n ).exclude(priority=0).update(priority=F('priority') + 1)\n elif new_priority == 0:\n Features.objects.filter(client=self.client, priority__gte=old_priority,\n ).exclude(priority=0).update(priority=F('priority') - 1)\n elif new_priority > old_priority:\n Features.objects.filter(client=self.client, priority__gt=old_priority, priority__lte=new_priority,\n ).exclude(priority=0).update(priority=F('priority') - 1)\n else:\n Features.objects.filter(client=self.client, priority__gte=new_priority, priority__lt=old_priority,\n ).exclude(priority=0).update(priority=F('priority') + 1)\n\n max_priority = Features.objects.filter(client=self.client, status='A').aggregate(\n Max('priority'))['priority__max']\n\n if max_priority is not None and new_priority > max_priority:\n self.priority = max_priority + 1 # priority must be sequential\n else:\n self.priority = 0 # only features that have status=A can be priorities\n super().save(*args, **kwargs) # calling super to do the default action.", "def _order_ec2_deployers_by_priority(ec2_deployers):\n io_unhealthy = []\n io_healthy = []\n o_unhealthy = []\n o_healthy = []\n\n for ec2_deployer in ec2_deployers:\n deployer = ec2_deployer\n node = deployer.get_node()\n if node.is_operational:\n if node.is_healthy:\n o_healthy.append(ec2_deployer)\n else:\n o_unhealthy.append(ec2_deployer)\n else:\n if node.is_healthy:\n io_healthy.append(ec2_deployer)\n else:\n io_unhealthy.append(ec2_deployer)\n\n return io_healthy + io_unhealthy + o_unhealthy + o_healthy", "def BandwidthPriority2(self):\r\n\t\treturn self._get_attribute('bandwidthPriority2')", "def number_packets_client_server(self):\n # Local variables to avoid having very long lines\n cli_serv = self._number_packets_client_server\n serv_cli = self._number_packets_server_client\n total = self.total_number_packets\n\n # If the value that is asked is not initialized\n if cli_serv is None:\n\n # If the other one isn't initialized too, compute both of them\n if serv_cli is None:\n cli_serv = 0\n for p in self._packets:\n if p[IP].src == self._client:\n cli_serv += 1\n self._number_packets_client_server = cli_serv\n self._number_packets_server_client = total - cli_serv\n\n # If the other one is initialized get the value of this one from it\n else:\n self._number_packets_client_server = total - serv_cli\n\n # Return the asked value\n return self._number_packets_client_server", "def _getNodePriority(G, node):\n priority = 1.0\n if ConstructionOrdering.kAttrNameGraphMaxDependentNumber in G.graph and G.graph[ConstructionOrdering.kAttrNameGraphMaxDependentNumber] != 0:\n if ConstructionOrdering.kAttrNameNodeDependentSetSize in G.node[node]:\n priority *= ((G.node[node][ConstructionOrdering.kAttrNameNodeDependentSetSize] + 1) / (G.graph[ConstructionOrdering.kAttrNameGraphMaxDependentNumber] + 1))\n if ConstructionOrdering.kAttrNameGraphMaxLevel in G.graph and G.graph[ConstructionOrdering.kAttrNameGraphMaxLevel] != 0:\n if ConstructionOrdering.kAttrNameNodeLevel in G.node[node]:\n numerator = G.graph[ConstructionOrdering.kAttrNameGraphMaxLevel] - G.node[node][ConstructionOrdering.kAttrNameNodeLevel]\n if numerator == 0:\n numerator = 0.5\n priority *= (numerator / G.graph[ConstructionOrdering.kAttrNameGraphMaxLevel])\n return priority", "def OrganizeProviderServers(servers):\n\n serversByIP = {}\n\n for server in servers:\n # Grab the first three characters of our IP. This will help us\n # sort our servers into blocks, and we'll iterate over each block\n # when we query servers.\n serverIP = server[\"ip\"]\n serverUniqueID = serverIP[0:3]\n if (serverUniqueID not in serversByIP):\n # Create a new list to put our servers in.\n serversByIP[serverUniqueID] = []\n\n # Add our server by IP to this list.\n serversByIP[serverUniqueID].append(server)\n\n # Grab our total amount of servers.\n total = 0\n for ID in serversByIP:\n total += len(serversByIP[ID])\n\n serversSorted = 0\n servers = []\n\n # Grab a server, one by one, from each region and remove it.\n # Lets say we have 6 unique IPs with the amount of servers being\n # 6, 6, 3, 3, 3, 4. Goto the first one, take the first server, and remove\n # it from the list and put it into our final sorted list. Goto the next\n # server list, pop the first one, add it to the final sorted list, repeat.\n\n # While the amount of the servers we've put into the list doesn't\n # match our full total.\n while serversSorted != total:\n for group in serversByIP:\n # If we don't have anymore servers in this group,\n # don't worry about it in the future by deleting it\n # from the dict.\n if len(serversByIP[group]) == 0:\n del serversByIP[group]\n break # Break the for loop here to prevent a RuntimeError.\n\n # Grab the very first server and \"pop\" it, removing it\n # from the list while grabbing it at the same time.\n server = serversByIP[group].pop(0)\n servers.append(server)\n\n serversSorted += 1\n\n # All the servers have been sorted, set this list for our provider.\n return servers" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that when reneging the correct next event time is detected.
def test_reneging_next_event(self): N = ciw.create_network( arrival_distributions=[ciw.dists.Deterministic(7)], service_distributions=[ciw.dists.Deterministic(11)], number_of_servers=[1], reneging_time_distributions=[ciw.dists.Deterministic(3)] ) Q = ciw.Simulation(N) self.assertTrue(Q.nodes[1].reneging) #### We would expect: # t=7 arrival cust 1 # t=14 arrival cust 2 # t=17 renege cust 2 # t=18 leave cust 1 # t=21 arrival cust 3 # t=28 arrival cust 4 # t=31 renege cust 4 # t=32 leave cust 3 Q.simulate_until_max_time(6) self.assertEqual(Q.nodes[0].next_event_date, 7) self.assertEqual(Q.nodes[1].next_event_date, float('inf')) self.assertEqual(Q.nodes[1].next_renege_date, float('inf')) Q.simulate_until_max_time(13) self.assertEqual(Q.nodes[0].next_event_date, 14) self.assertEqual(Q.nodes[1].next_event_date, 18) self.assertEqual(Q.nodes[1].next_renege_date, float('inf')) Q.simulate_until_max_time(16) self.assertEqual(Q.nodes[0].next_event_date, 21) self.assertEqual(Q.nodes[1].next_event_date, 17) self.assertEqual(Q.nodes[1].next_renege_date, 17) Q.simulate_until_max_time(17.5) self.assertEqual(Q.nodes[0].next_event_date, 21) self.assertEqual(Q.nodes[1].next_event_date, 18) self.assertEqual(Q.nodes[1].next_renege_date, float('inf')) Q.simulate_until_max_time(20) self.assertEqual(Q.nodes[0].next_event_date, 21) self.assertEqual(Q.nodes[1].next_event_date, float('inf')) self.assertEqual(Q.nodes[1].next_renege_date, float('inf')) Q.simulate_until_max_time(27) self.assertEqual(Q.nodes[0].next_event_date, 28) self.assertEqual(Q.nodes[1].next_event_date, 32) self.assertEqual(Q.nodes[1].next_renege_date, float('inf')) Q.simulate_until_max_time(30) self.assertEqual(Q.nodes[0].next_event_date, 35) self.assertEqual(Q.nodes[1].next_event_date, 31) self.assertEqual(Q.nodes[1].next_renege_date, 31) Q.simulate_until_max_time(31.5) self.assertEqual(Q.nodes[0].next_event_date, 35) self.assertEqual(Q.nodes[1].next_event_date, 32) self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))
[ "def test_update_time_tracking_entry(self):\n pass", "def test_next_occurrence():\n schedule_start = timezone.now()\n schedule_every = timedelta(hours=1)\n schedule = Schedule(start=schedule_start, every=schedule_every)\n expected = schedule_start + schedule_every\n assert schedule.next_occurrence() == expected", "def test_get_time_tracking_entry(self):\n pass", "def test_nextdate_c2(self):", "def test_create_time_tracking_entry(self):\n pass", "def test_timeframes(self):\n pass", "def test_time(self):\n self.logTestName()\n self.assertEqual(self.t, self.r)", "def test_user_tracked_times(self):\n pass", "def test_post_muveto_current_change1ts(self):\n pass", "def _next_event_time(self):\n return self._scheduler.next()[1]", "def check_events(self):\n for event in self.events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n if len(start) < 20: # ignore events which last all day(do not have a time)\n self.update_event(event)\n continue\n print(\"\\n\\n\\n\\n\", start, event['summary'])\n if 'description' in event:\n if event['description'] == '#Created by MapThat#':\n self.prev_event_travel = 1\n self.prev_travel_event_id = event['id']\n continue\n if ('#This event has been checked by MapThat#' in event['description'] \n and self.prev_event_traversed == 1):\n #this is to make sure that there are no changes in the previous event which can affect the travel time to the current event\n self.prev_time = datetime.datetime.strptime(\n (event['end'].get('dateTime', event['end'].get('date'))),\n \"%Y-%m-%dT%H:%M:%S%z\")\n if 'location' in event:\n self.prev_location = event['location']\n self.prev_event_traversed = 1\n self.prev_event_travel = 0\n continue\n if self.prev_event_travel == 1 and self.prev_travel_event_id not in [None]:\n self.service.events().delete(calendarId='primary',\n eventId=self.prev_travel_event_id).execute()\n start = datetime.datetime.strptime(start, \"%Y-%m-%dT%H:%M:%S%z\")\n self.prev_event_traversed = 0\n self.update_event(event)\n time_diff = ((start-self.prev_time).total_seconds())\n #checking if the event has a location. if it doesnt have a loction, it is flagged and we check the next event\n if 'location' in event:\n print(\"location: \", event['location'])\n if self.mode is None:\n self.mode = input(\n '''Enter exact string out of following:[DRIVING, WALKING, BICYCLING, TRANSIT]\\n''')\n if time_diff >= 3600:\n src = self.default_location\n else:\n src = self.default_location\n if self.prev_location not in [None]:\n src = self.get_lat_log(self.prev_location)\n travel_time = self.get_distance(event['location'], src)\n self.event_create(start, travel_time)\n self.prev_location = event['location']\n else:\n print(\"no Location\")\n self.prev_location = None\n self.prev_time = datetime.datetime.strptime(\n (event['end'].get('dateTime', event['end'].get('date'))), \"%Y-%m-%dT%H:%M:%S%z\")\n self.prev_event_travel = 0\n self.prev_event_id = None\n self.prev_travel_event_id = None\n #resetting all flags", "def test_time_series(self):\n\n assert False", "def testCheckLastEvent(self):\n event_tester = EventTester()\n event = Event()\n event_tester.notify(event)\n self.assertEqual(event_tester.last_event(), event)", "def check_time(iteration, start, end):\r\n return start <= iteration % 24 < end", "def test_user_current_tracked_times(self):\n pass", "def test_timecreate(self):\n\n new_jawn = self._class()\n time = datetime.datetime.now()\n self.assertIsInstance(new_jawn.create_at, datetime.datetime)\n self.assertTrue(0 < (time - new_jawn.create_at).total_seconds() < 1)", "def test_generate_event(self):\n pass", "def test_eval_schedule_time(schedule):\n schedule.opts.update({\"pillar\": {\"schedule\": {}}})\n schedule.opts.update(\n {\"schedule\": {\"testjob\": {\"function\": \"test.true\", \"seconds\": 60}}}\n )\n now = datetime.datetime.now()\n schedule.eval()\n assert schedule.opts[\"schedule\"][\"testjob\"][\"_next_fire_time\"] > now", "def is_event(self):\n current_time = self.current_time()\n current_event_time = self.events[self.current_event]\n cet = current_event_time\n current_event_time = time(cet.hour, cet.minute, cet.second)\n self.logger.debug(\"current_event_time: {0}\".format(current_event_time))\n fudge_factor = (datetime.combine(date(1,1,1),\n current_event_time) + timedelta(seconds=60)).time()\n self.logger.debug(\"fudge_factor: {0}\".format(fudge_factor))\n status = current_event_time <= current_time <= fudge_factor\n return status" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Only one type of customer arrive (Class 0), but if they wait more than 4 time units they change to Class 1. Services last exactly 4.5 time units. Simulate until 26 time units.
def test_class_change_while_waiting(self): N = ciw.create_network( arrival_distributions={'Class 0': [ciw.dists.Deterministic(3)], 'Class 1': [ciw.dists.NoArrivals()]}, service_distributions={'Class 0': [ciw.dists.Deterministic(4.5)], 'Class 1': [ciw.dists.Deterministic(4.5)]}, number_of_servers=[1], class_change_time_distributions=[ [None, ciw.dists.Deterministic(4)], [None, None]] ) Q = ciw.Simulation(N) Q.simulate_until_max_time(26) recs = Q.get_all_records() self.assertEqual(len(recs), 5) # Customer 1 self.assertEqual(recs[0].arrival_date, 3) self.assertEqual(recs[0].waiting_time, 0) self.assertEqual(recs[0].service_start_date, 3) self.assertEqual(recs[0].service_end_date, 7.5) self.assertEqual(recs[0].customer_class, 0) self.assertEqual(recs[0].original_customer_class, 0) # Customer 2 self.assertEqual(recs[1].arrival_date, 6) self.assertEqual(recs[1].waiting_time, 1.5) self.assertEqual(recs[1].service_start_date, 7.5) self.assertEqual(recs[1].service_end_date, 12) self.assertEqual(recs[1].customer_class, 0) self.assertEqual(recs[1].original_customer_class, 0) # Customer 3 self.assertEqual(recs[2].arrival_date, 9) self.assertEqual(recs[2].waiting_time, 3) self.assertEqual(recs[2].service_start_date, 12) self.assertEqual(recs[2].service_end_date, 16.5) self.assertEqual(recs[2].customer_class, 0) self.assertEqual(recs[2].original_customer_class, 0) # Customer 4 self.assertEqual(recs[3].arrival_date, 12) self.assertEqual(recs[3].waiting_time, 4.5) self.assertEqual(recs[3].service_start_date, 16.5) self.assertEqual(recs[3].service_end_date, 21) self.assertEqual(recs[3].customer_class, 1) self.assertEqual(recs[3].original_customer_class, 0) # Customer 5 self.assertEqual(recs[4].arrival_date, 15) self.assertEqual(recs[4].waiting_time, 6) self.assertEqual(recs[4].service_start_date, 21) self.assertEqual(recs[4].service_end_date, 25.5) self.assertEqual(recs[4].customer_class, 1) self.assertEqual(recs[4].original_customer_class, 0)
[ "def test_preemptive_priorities_at_class_change(self):\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [False]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 1)\n self.assertEqual(float(recs[3].waiting_time), 1.5)\n self.assertEqual(float(recs[4].waiting_time), 2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 7)\n self.assertEqual(float(recs[3].service_start_date), 9.5)\n self.assertEqual(float(recs[4].service_start_date), 12)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 9.5)\n self.assertEqual(float(recs[3].service_end_date), 12)\n self.assertEqual(float(recs[4].service_end_date), 14.5)\n\n # Now with preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n all_recs = Q.get_all_records()\n recs = [r for r in all_recs if r.record_type == 'service']\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 5.7)\n self.assertEqual(float(recs[3].waiting_time), 1.2)\n self.assertEqual(float(recs[4].waiting_time), 4.2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 11.7)\n self.assertEqual(float(recs[3].service_start_date), 9.2)\n self.assertEqual(float(recs[4].service_start_date), 14.2)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 14.2)\n self.assertEqual(float(recs[3].service_end_date), 11.7)\n self.assertEqual(float(recs[4].service_end_date), 16.7)\n\n # Test interrupted service data records\n interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service']\n self.assertEqual(len(interrupted_recs), 1)\n self.assertEqual(float(interrupted_recs[0].arrival_date), 6)\n self.assertEqual(float(interrupted_recs[0].service_start_date), 7)\n self.assertEqual(float(interrupted_recs[0].waiting_time), 1)\n self.assertEqual(float(interrupted_recs[0].exit_date), 9.2)\n self.assertEqual(float(interrupted_recs[0].service_time), 2.5)\n self.assertTrue(isnan(interrupted_recs[0].service_end_date))", "def test_reneging_next_event(self):\n N = ciw.create_network(\n arrival_distributions=[ciw.dists.Deterministic(7)],\n service_distributions=[ciw.dists.Deterministic(11)],\n number_of_servers=[1],\n reneging_time_distributions=[ciw.dists.Deterministic(3)]\n )\n Q = ciw.Simulation(N)\n self.assertTrue(Q.nodes[1].reneging)\n #### We would expect:\n # t=7 arrival cust 1\n # t=14 arrival cust 2\n # t=17 renege cust 2\n # t=18 leave cust 1\n # t=21 arrival cust 3\n # t=28 arrival cust 4\n # t=31 renege cust 4\n # t=32 leave cust 3\n Q.simulate_until_max_time(6)\n self.assertEqual(Q.nodes[0].next_event_date, 7)\n self.assertEqual(Q.nodes[1].next_event_date, float('inf'))\n self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))\n Q.simulate_until_max_time(13)\n self.assertEqual(Q.nodes[0].next_event_date, 14)\n self.assertEqual(Q.nodes[1].next_event_date, 18)\n self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))\n Q.simulate_until_max_time(16)\n self.assertEqual(Q.nodes[0].next_event_date, 21)\n self.assertEqual(Q.nodes[1].next_event_date, 17)\n self.assertEqual(Q.nodes[1].next_renege_date, 17)\n Q.simulate_until_max_time(17.5)\n self.assertEqual(Q.nodes[0].next_event_date, 21)\n self.assertEqual(Q.nodes[1].next_event_date, 18)\n self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))\n Q.simulate_until_max_time(20)\n self.assertEqual(Q.nodes[0].next_event_date, 21)\n self.assertEqual(Q.nodes[1].next_event_date, float('inf'))\n self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))\n Q.simulate_until_max_time(27)\n self.assertEqual(Q.nodes[0].next_event_date, 28)\n self.assertEqual(Q.nodes[1].next_event_date, 32)\n self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))\n Q.simulate_until_max_time(30)\n self.assertEqual(Q.nodes[0].next_event_date, 35)\n self.assertEqual(Q.nodes[1].next_event_date, 31)\n self.assertEqual(Q.nodes[1].next_renege_date, 31)\n Q.simulate_until_max_time(31.5)\n self.assertEqual(Q.nodes[0].next_event_date, 35)\n self.assertEqual(Q.nodes[1].next_event_date, 32)\n self.assertEqual(Q.nodes[1].next_renege_date, float('inf'))", "def generate_customers(self):\n arrive_time = randint(50, 100)\n while self.status:\n self.clock.run(until=arrive_time)\n c = Customer(id=self.customer_count, time=arrive_time)\n self.history.append(c)\n self.queue_lock.acquire()\n self.customer_queue.append(c)\n self.queue_lock.release()\n self.customer_count = self.customer_count + 1\n arrive_time = arrive_time + randint(50, 100)", "def wait_until_ally_time ( req_type ):\n\t# Get our stuff in utc\n\tnow = datetime.now( tz=timezone.utc )\n\n\t# Get the time we have stored\n\ta_time = _rl_exp_datetime[req_type.value]\n\n\t# Make sure we have a valid time\n\tif a_time is None:\n\t\ta_time = now + timedelta(seconds=60.5)\n\n\t# Block thread\n\ttime.sleep( (a_time - now).total_seconds() )", "def customer_arrival(team, queue, data_names, data_issues, level_min, level_max):\n team_size = len(team)\n for _ in range(0, team_size):\n customer = operations.random_customer(data_names, data_issues, level_min, level_max)\n queue.append(customer)\n if team_size == 1:\n arrival_descriptor = \"A customer has\"\n else:\n arrival_descriptor = str(team_size) + \" customers have\"\n print(arrival_descriptor + \" arrived at the helpdesk.\")", "def waiting(customer):\n clock = RealtimeEnvironment(initial_time=0, factor=speed, strict=False)\n customer.action() # Waiting for queue to open\n # Try to get into a queue every seconds\n for i in range(1, 20):\n clock.run(until=i)\n if not (window0.full and window1.full):\n customer.queue_action()\n customer.add_time(i)\n if window0.qsize() < window1.qsize():\n window0.put(customer)\n print(queue_message.format(customer.id, self.window0.id))\n else:\n window1.put(customer)\n print(queue_message.format(customer.id, self.window1.id))\n if not customer.in_window():\n # leave and put them back into the queue\n print(\"Customer {} has left and will try again later\".format(customer.id))\n customer.action() # Leaving the line\n customer.add_time(600)\n clock.run(until=620)\n people_lock.acquire()\n people.append(customer)\n\n customer.action() # Rejoining the line\n people_lock.release()\n print(\"Customer {} had rejoined the waiting list\".format(customer.id))\n\n # Leave permanently\n clock.run(until=660)\n people_lock.acquire()\n if not customer.in_window():\n customer.action() # Leaving Permanently\n customer.add_time(40)\n people.remove(customer)\n print(\"Customer {} has left permanently\".format(customer.id))\n people_lock.release()", "def check_timer(self, step, limit = 1):\n timer_now = datetime.datetime.now(tz = self.USeasternTimeZone)\n change = (timer_now-self.timer_start).total_seconds()\n if change > limit: # if time limit exceeded\n if step == self.accountManagerState.WAIT_FOR_INIT_CALLBACK:\n if self.nextOrderId_Status !='Done':\n self.log.error(__name__ + \": \" + 'Time Limit Exceeded when \\\n requesting nextValidId' + str(step,datetime.datetime.now()) + \\\n '\\n' + 'self.nextValidId_status = ' + str(self.nextValidId_status))\n exit()\n elif self.req_real_time_price_check_end() != 'Done':\n self.log.error(__name__ + \": \" + 'ERROR in receiving real time quotes') \n for security in self.data:\n security.print_obj()\n #for ct in [self.data[security].bid_price,self.data[security].ask_price]: \n # if ct < 0.0001:\n # self.log.error(__name__ + \": \" + security.print_obj())\n exit() \n elif self.accountDownloadEndstatus !='Done':\n self.log.error(__name__ + \": \" + 'ERROR in accountDonwload') \n else: \n self.log.error(__name__ + \": ERROR in retrieve hist data\")\n for reqid in self.returned_hist:\n print self.returned_hist[reqid].status, reqid\n if self.returned_hist[reqid].status!='Done':\n self.log.error(__name__ + \": \" + self.returned_hist[reqid].security.print_obj()\\\n +'.'+self.returned_hist[reqid].period)\n\n #print self.returned_hist[reqid].security.symbol+'.' \\\n # +self.returned_hist[reqid].security.currency+'.' \\\n # +self.returned_hist[reqid].security.secType+'.'\\\n # +self.returned_hist[reqid].period\n elif step == self.accountManagerState.WAIT_FOR_DAILY_PRICE_CALLBACK:\n self.log.error(__name__ + \": \" + 'Time Limit Exceeded when \\\n requesting historical daily data' + step, datetime.datetime.now() + \\\n '\\n' + 'The content of self.hist_daily: ')\n for security in self.data:\n self.log.info(__name__ + \": \" + str(self.data[security].hist_daily.head()))\n# if self.re_send < 3: \n# self.log.error(__name__ + \": \" + 'Re-send req_daily_price_first')\n# self.re_send += 1\n# self.req_hist_price(endtime=datetime.datetime.now())\n# self.set_timer()\n# else:\n# self.log.error(__name__ + \": \" + 'Re-send request three times, EXIT')\n# exit()\n elif step == self.accountManagerState.WAIT_FOR_BAR_PRICE_CALLBACK:\n self.log.error(__name__ + \": \" + 'Time Limit Exceeded when \\\n requesting historical bar data' + \\\n str(step) + str(datetime.datetime.now()))\n for security in self.data:\n self.log.info(__name__ + \": \" + str(self.data[security].hist_bar.head()))\n# if self.re_send < 3: \n# self.accountManagerState.set_state(\n# self.accountManagerState.REQ_BAR_PRICE)\n# self.log.error(__name__ + \": \" + 'Re-send req_bar_price_first')\n# self.re_send += 1\n# self.set_timer()\n# else:\n# self.log.error(__name__ + \": \" + 'Re send request three times, EXIT')\n# exit()\n elif step == self.accountManagerState.WAIT_FOR_UPDATE_PORTFOLIO_CALLBACK:\n self.log.error(__name__ + \": \" + 'update account failed')", "def gas_meter(self, data):\n\n dtime = data.get('Time')\n\n self.newTime = parser.parse(dtime)\n self.meterID = data.get('Message').get('ID')\n self.currentTime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n\n self.newConsumption = data.get('Message').get('Consumption')\n \n self.meter_type = \"Gas\"\n\n if not self.meterID in config.meters.keys():\n if config.debug:print(\"first time seeing this id: {}\".format(self.meterID))\n config.meters[self.meterID] = {\"Time\": self.newTime, \"ID\":self.meterID, \"Consumption\": self.newConsumption}\n return False\n else:\n\n self.oldConsumption = config.meters[self.meterID].get('Consumption')\n self.oldTime = config.meters[self.meterID].get('Time')\n\n # level shift.\n config.meters[self.meterID]['Consumption'] = self.newConsumption\n config.meters[self.meterID]['Time'] = self.newTime\n\n\n self.timeDiff = self.newTime - self.oldTime\n\n ##### DEbUG TAKE OUT.\n #if self.meterID in config.myMeters:print(data)\n\n if(self.timeDiff.total_seconds() < 0):print(\"Error: Time Diff Negative. Customer: %s. %d - %d = %d\" % (self.meterID, self.newTime, self.oldTime, self.timeDiff))\n\n self.mcfDiff = self.newConsumption - self.oldConsumption\n\n #if(self.wattDiff != 0):\n #if(self.mcfDiff):\n \n if data.get('Message').get('Consumption'):\n #print(data)\n self.mcfPerMin = (self.mcfDiff / (self.timeDiff.total_seconds() / 60)) / 1000 # <-\n\n # if numbers are way out of range throw error\n if self.meterID in config.myMeters:\n print(\"[%s] Customer %s Using %f mcf per minute. (consumption: %d) - (time elapsed: %d s) ### %d\" % (self.currentTime, self.meterID, self.mcfPerMin, self.mcfDiff, self.timeDiff.total_seconds(),self.newConsumption))\n else:\n print(\"[%s] Customer %s Using %f mcf per minute. (consumption: %d) - (time elapsed: %d s)\" % (self.currentTime, self.meterID, self.mcfPerMin, self.mcfDiff, self.timeDiff.total_seconds()))\n\n self.log_data(data,self.mcfDiff,self.mcfPerMin,\"mcf/min\")\n \n else:\n # consumption data hasn't changed. time shift back and wait some more.\n config.meters[self.meterID]['Time'] = self.oldTime\n config.meters[self.meterID]['Consumption'] = self.oldConsumption #redundant?\n \n self.log_data(data,0,0,\"mcf/min\")\n\n return True", "def wait_time(self):\r\n if self.cashier_arrival != None and self.line_arrival != None:\r\n return self.cashier_arrival - self.line_arrival", "def simulate_controllers(client, startCTE, startHE, startDTP, \n taxi_controller=TaxiController(0.015, 0.008), \n takeoff_controller=TakeoffController(0.07, 0.035, 0.01, 0.01),\n climb_controller=ClimbController(0.001, 0.01),\n simSpeed=1.0):\n # Reset to the desired starting position\n client.sendDREF(\"sim/time/sim_speed\", simSpeed)\n xpc3_helper.reset(client, cteInit = startCTE, heInit = startHE, dtpInit = startDTP)\n xpc3_helper.sendBrake(client, 0)\n\n time.sleep(2) # 5 seconds to get terminal window out of the way\n client.pauseSim(False)\n\n time.sleep(0.001)\n init_elevation = client.getDREF(\"sim/flightmodel/position/elevation\")[0]\n dtp = startDTP\n startTime = client.getDREF(\"sim/time/zulu_time_sec\")[0]\n endTime = startTime\n \n # Lets start witht the taxi controller\n controller = taxi_controller\n\n print(\"Taxiing!\")\n\n while True:\n \n # Get relevant state variables\n speed = xpc3_helper.getSpeed(client)\n cte, dtp, he = xpc3_helper.getHomeState(client)\n lat, lon, el = coords(client)\n psi, theta, phi = angles(client)\n roll_speed, pitch_speed, yaw_speed = getSpins(client)\n vert_speed = getVertSpeed(client)\n \n # Store them in a state dictionary\n state = {\"speed\" : speed, \"cte\" : cte, \"he\" : he,\n \"lat\" : lat, \"lon\" : lon, \"el\" : el,\n \"psi\" : psi, \"theta\" : theta, \"phi\" : phi,\n \"roll_speed\" : roll_speed, \"pitch_speed\" : pitch_speed, \"yaw_speed\" : yaw_speed,\n \"vert_speed\" : vert_speed}\n \n \n # print(state)\n \n # Set the controller here if you need to\n \n # If we are taxiing and we reach the center of the runway, lets take off!\n if controller == taxi_controller and abs(state[\"he\"]) < 1 and abs(state[\"cte\"]) < 1:\n print(\"Taking off!\")\n controller = takeoff_controller\n \n if controller == takeoff_controller and abs(state[\"speed\"]) > 30:\n print(\"Climbing!\")\n controller = climb_controller\n \n \n\n # Get and send the controls from our controller\n ctrl = controller.control(client, state)\n send_controls(client, ctrl)\n \n # Wait for next timestep\n while endTime - startTime < 1:\n time.sleep(0.01)\n endTime = client.getDREF(\"sim/time/zulu_time_sec\")[0]\n \n\n # Set things for next round\n time.sleep(0.01)\n startTime = client.getDREF(\"sim/time/zulu_time_sec\")[0]\n endTime = startTime\n \n time.sleep(0.001)\n\n client.pauseSim(True)", "def gas_station_control(env, fuel_pump):\n while True:\n if fuel_pump.level / fuel_pump.capacity * 100 < THRESHOLD:\n # We need to call the tank truck now!\n txt = ('Calling tank truck at %d' % env.now).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n # Wait for the tank truck to arrive and refuel the station\n yield env.process(tank_truck(env, fuel_pump))\n\n yield env.timeout(10) # Check every 10 seconds", "def ActivationStartle(agentData, paraAgents, step):\n\n agentData.timer_startle[agentData.v_m > agentData.v_t[:, step]] = paraAgents.duration_startle\n agentData.timer_refractory[agentData.v_m > agentData.v_t[:, step]] = paraAgents.duration_refractory\n agentData.v_m[agentData.v_m > agentData.v_t[:, step]] = paraAgents.e_l\n\n # membrane potential is clipped to the resting potential for the refractory period\n agentData.v_m[agentData.timer_refractory >= 0.0] = paraAgents.e_l\n\n condition_new_startle = (agentData.timer_startle > 0.0) & (np.sum(agentData.force_startle) == 0) \\\n & agentData.allow_startling\n indices_new_startle = np.where(condition_new_startle)[0]\n\n for i in indices_new_startle:\n # new_phi_startle = agentData.phiDes_startle[i] + 0.1 * np.pi * np.random.random()\n new_phi_startle = agentData.phiDes_startle[i]\n agentData.force_startle[i, 0] = np.cos(new_phi_startle)\n agentData.force_startle[i, 1] = np.sin(new_phi_startle)\n agentData.startle[i] = 1\n if paraAgents.print_startles:\n print('Individual {} startles!!!!'.format(i))\n\n return", "def simulate( self, finishTime ):\n ...", "def getNumCustomersWaiting(self):\r\n return 0", "def serve_queue(self):\n serve_time = 0\n while self.status:\n if not self.queue.empty():\n time = randint(300, 600)\n serve_time = serve_time + time\n self.clock.run(until=serve_time)\n c = self.queue.get()\n print(\"Customer {} has finished being served\".format(c.id))\n c.finished(time)\n self.payment_queue.complete(c)", "def arrival_transition(self, t):\n if self.state == 'COLD' or self.state == 'WARM':\n raise Exception('instance is already busy!')\n\n elif self.state == 'IDLE':\n self.state = 'WARM'\n self.is_busy = True\n self.next_departure = t + self.warm_service_process.generate_trace()\n self.update_next_termination()", "def delays_requiring_compensation(arrival_delay, departure_delay):\n count = 0\n if (arrival_delay/60.0 >= 3) | (departure_delay/60.0 >= 2):\n # If arrival delay is 3+ hours, or if departure delay is 2+ hours:\n count += 1\n return count", "def gasStation():\n RANDOM_SEED = 42\n GAS_STATION_SIZE = 500 # liters\n THRESHOLD = 10 # Threshold for calling the tank truck (in %)\n FUEL_TANK_SIZE = 50 # liters\n FUEL_TANK_LEVEL = [5, 25] # Min/max levels of fuel tanks (in liters)\n REFUELING_SPEED = 2 # liters / second\n TANK_TRUCK_TIME = 300 # Seconds it takes the tank truck to arrive\n T_INTER = [15, 300] # Create a car every [min, max] seconds\n SIM_TIME = 20000 # Simulation time in seconds\n\n\n def car(name, env, gas_station, fuel_pump):\n \"\"\"\n A car arrives at the gas station for refueling.\n\n It requests one of the gas station's fuel pumps and tries to get the\n desired amount of gas from it. If the stations reservoir is\n depleted, the car has to wait for the tank truck to arrive.\n\n \"\"\"\n fuel_tank_level = random.randint(*FUEL_TANK_LEVEL)\n\n txt = ('%s arriving at gas station at %.1f' % (name, env.now)).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n\n with gas_station.request() as req:\n start = env.now\n # Request one of the gas pumps\n yield req\n\n # Get the required amount of fuel\n liters_required = FUEL_TANK_SIZE - fuel_tank_level\n yield fuel_pump.get(liters_required)\n\n # The \"actual\" refueling process takes some time\n yield env.timeout(liters_required / REFUELING_SPEED)\n\n txt = ('%s finished refueling in %.1f seconds' % (name, env.now - start)).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n\n def gas_station_control(env, fuel_pump):\n \"\"\"\n Periodically check the level of the *fuel_pump* and call the tank\n truck if the level falls below a threshold.\n \"\"\"\n while True:\n if fuel_pump.level / fuel_pump.capacity * 100 < THRESHOLD:\n # We need to call the tank truck now!\n txt = ('Calling tank truck at %d' % env.now).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n # Wait for the tank truck to arrive and refuel the station\n yield env.process(tank_truck(env, fuel_pump))\n\n yield env.timeout(10) # Check every 10 seconds\n\n\n def tank_truck(env, fuel_pump):\n \"\"\"\n Arrives at the gas station after a certain delay and refuels it.\n \"\"\"\n yield env.timeout(TANK_TRUCK_TIME)\n\n txt = ('Tank truck arriving at time %d' % env.now).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n\n ammount = fuel_pump.capacity - fuel_pump.level\n\n txt = ('Tank truck refuelling %.1f liters' % ammount).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n\n yield fuel_pump.put(ammount)\n\n\n def car_generator(env, gas_station, fuel_pump):\n \"\"\"\n Generate new cars that arrive at the gas station.\n \"\"\"\n for i in itertools.count():\n yield env.timeout(random.randint(*T_INTER))\n env.process(car('Car %d' % i, env, gas_station, fuel_pump))\n\n # Setup and start the simulation\n txt = ('Gas Station Refuelling Simulation Started at %s' % datetime.datetime.now()).encode()\n producer.send(\"gasStation\", txt)\n #producer.send(\"gasStation\", {'msg': txt})\n\n random.seed(RANDOM_SEED)\n\n # Create environment and start processes\n env = simpy.rt.RealtimeEnvironment(factor=0.05)\n gas_station = simpy.Resource(env, 2)\n fuel_pump = simpy.Container(env, GAS_STATION_SIZE, init=GAS_STATION_SIZE)\n env.process(gas_station_control(env, fuel_pump))\n env.process(car_generator(env, gas_station, fuel_pump))\n\n # Execute\n env.run(until=SIM_TIME)", "def updateOneService(self, reservation):\n # Adds information to the new service\n self.setServiceClient(reservation.getReservClient())\n\n # checks if it's going to be a delay, that is, if the driver/vehicle is not available at the requested time\n self.calculateDepartAndArrivalHour(reservation)\n\n self.setServiceCircuit(reservation.getReservCircuit())\n self.setServiceCircuitKms(reservation.getReservCircuitKms())\n\n # Calculates how much work time is left for the driver after this service\n duration = reservation.duration()\n new_accumulated_hours = self.getAccumTime().add(duration)\n allowed_time_left = Driver.TIMELimit.diff(new_accumulated_hours)\n\n # Calculates how much kms are left fot the vehicle after this service\n new_accumulated_kms = int(self.getVehicleKmsDone()) + int(self.getServiceCircuitKms())\n allowed_kms_left = int(self.getVehicleAutonomy()) - new_accumulated_kms\n\n # set common parameters\n self.setAccumTime(new_accumulated_hours)\n self.setVehicleKmsDone(new_accumulated_kms)\n\n # Adds the rest of the information, depending on the allowed time and kms left\n if allowed_time_left < Driver.TIMEThreshold:\n self.setServiceDriverStatus(Driver.STATUSTerminated)\n\n elif allowed_kms_left < Vehicle.AUTONThreshold:\n self.setServiceDriverStatus(Driver.STATUSCharging)\n self.setServiceCircuitKms(reservation.getReservCircuitKms())\n\n else:\n self.setServiceDriverStatus(Driver.STATUSStandBy)\n\n self.setVehicleAutonomy(self.getVehicleAutonomy())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
One server. Two classes of customer, 0 and 1, 0 higher priority than 1. Only Class 1 arrive, every 2 time units All classes have service distribution Deterministic 2.5. Class 1 turn into class 0 after waiting 1.2 time units
def test_preemptive_priorities_at_class_change(self): # First without preemption: N = ciw.create_network( arrival_distributions={ 'Class 0': [ciw.dists.NoArrivals()], 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]}, service_distributions={ 'Class 0': [ciw.dists.Deterministic(2.5)], 'Class 1': [ciw.dists.Deterministic(2.5)]}, number_of_servers=[1], priority_classes=({'Class 0': 0, 'Class 1': 1}, [False]), class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]] ) Q = ciw.Simulation(N, exact=26) Q.simulate_until_max_time(20) recs = Q.get_all_records() recs.sort(key=lambda r: r.arrival_date) self.assertEqual(float(recs[0].arrival_date), 2) self.assertEqual(float(recs[1].arrival_date), 4) self.assertEqual(float(recs[2].arrival_date), 6) self.assertEqual(float(recs[3].arrival_date), 8) self.assertEqual(float(recs[4].arrival_date), 10) self.assertEqual(float(recs[0].waiting_time), 0) self.assertEqual(float(recs[1].waiting_time), 0.5) self.assertEqual(float(recs[2].waiting_time), 1) self.assertEqual(float(recs[3].waiting_time), 1.5) self.assertEqual(float(recs[4].waiting_time), 2) self.assertEqual(float(recs[0].service_start_date), 2) self.assertEqual(float(recs[1].service_start_date), 4.5) self.assertEqual(float(recs[2].service_start_date), 7) self.assertEqual(float(recs[3].service_start_date), 9.5) self.assertEqual(float(recs[4].service_start_date), 12) self.assertEqual(float(recs[0].service_end_date), 4.5) self.assertEqual(float(recs[1].service_end_date), 7) self.assertEqual(float(recs[2].service_end_date), 9.5) self.assertEqual(float(recs[3].service_end_date), 12) self.assertEqual(float(recs[4].service_end_date), 14.5) # Now with preemption: N = ciw.create_network( arrival_distributions={ 'Class 0': [ciw.dists.NoArrivals()], 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]}, service_distributions={ 'Class 0': [ciw.dists.Deterministic(2.5)], 'Class 1': [ciw.dists.Deterministic(2.5)]}, number_of_servers=[1], priority_classes=({'Class 0': 0, 'Class 1': 1}, ["resample"]), class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]] ) Q = ciw.Simulation(N, exact=26) Q.simulate_until_max_time(20) all_recs = Q.get_all_records() recs = [r for r in all_recs if r.record_type == 'service'] recs.sort(key=lambda r: r.arrival_date) self.assertEqual(float(recs[0].arrival_date), 2) self.assertEqual(float(recs[1].arrival_date), 4) self.assertEqual(float(recs[2].arrival_date), 6) self.assertEqual(float(recs[3].arrival_date), 8) self.assertEqual(float(recs[4].arrival_date), 10) self.assertEqual(float(recs[0].waiting_time), 0) self.assertEqual(float(recs[1].waiting_time), 0.5) self.assertEqual(float(recs[2].waiting_time), 5.7) self.assertEqual(float(recs[3].waiting_time), 1.2) self.assertEqual(float(recs[4].waiting_time), 4.2) self.assertEqual(float(recs[0].service_start_date), 2) self.assertEqual(float(recs[1].service_start_date), 4.5) self.assertEqual(float(recs[2].service_start_date), 11.7) self.assertEqual(float(recs[3].service_start_date), 9.2) self.assertEqual(float(recs[4].service_start_date), 14.2) self.assertEqual(float(recs[0].service_end_date), 4.5) self.assertEqual(float(recs[1].service_end_date), 7) self.assertEqual(float(recs[2].service_end_date), 14.2) self.assertEqual(float(recs[3].service_end_date), 11.7) self.assertEqual(float(recs[4].service_end_date), 16.7) # Test interrupted service data records interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service'] self.assertEqual(len(interrupted_recs), 1) self.assertEqual(float(interrupted_recs[0].arrival_date), 6) self.assertEqual(float(interrupted_recs[0].service_start_date), 7) self.assertEqual(float(interrupted_recs[0].waiting_time), 1) self.assertEqual(float(interrupted_recs[0].exit_date), 9.2) self.assertEqual(float(interrupted_recs[0].service_time), 2.5) self.assertTrue(isnan(interrupted_recs[0].service_end_date))
[ "def test_class_change_while_waiting(self):\n N = ciw.create_network(\n arrival_distributions={'Class 0': [ciw.dists.Deterministic(3)],\n 'Class 1': [ciw.dists.NoArrivals()]},\n service_distributions={'Class 0': [ciw.dists.Deterministic(4.5)],\n 'Class 1': [ciw.dists.Deterministic(4.5)]},\n number_of_servers=[1],\n class_change_time_distributions=[\n [None, ciw.dists.Deterministic(4)],\n [None, None]]\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(26)\n recs = Q.get_all_records()\n self.assertEqual(len(recs), 5)\n # Customer 1\n self.assertEqual(recs[0].arrival_date, 3)\n self.assertEqual(recs[0].waiting_time, 0)\n self.assertEqual(recs[0].service_start_date, 3)\n self.assertEqual(recs[0].service_end_date, 7.5)\n self.assertEqual(recs[0].customer_class, 0)\n self.assertEqual(recs[0].original_customer_class, 0)\n # Customer 2\n self.assertEqual(recs[1].arrival_date, 6)\n self.assertEqual(recs[1].waiting_time, 1.5)\n self.assertEqual(recs[1].service_start_date, 7.5)\n self.assertEqual(recs[1].service_end_date, 12)\n self.assertEqual(recs[1].customer_class, 0)\n self.assertEqual(recs[1].original_customer_class, 0)\n # Customer 3\n self.assertEqual(recs[2].arrival_date, 9)\n self.assertEqual(recs[2].waiting_time, 3)\n self.assertEqual(recs[2].service_start_date, 12)\n self.assertEqual(recs[2].service_end_date, 16.5)\n self.assertEqual(recs[2].customer_class, 0)\n self.assertEqual(recs[2].original_customer_class, 0)\n # Customer 4\n self.assertEqual(recs[3].arrival_date, 12)\n self.assertEqual(recs[3].waiting_time, 4.5)\n self.assertEqual(recs[3].service_start_date, 16.5)\n self.assertEqual(recs[3].service_end_date, 21)\n self.assertEqual(recs[3].customer_class, 1)\n self.assertEqual(recs[3].original_customer_class, 0)\n # Customer 5\n self.assertEqual(recs[4].arrival_date, 15)\n self.assertEqual(recs[4].waiting_time, 6)\n self.assertEqual(recs[4].service_start_date, 21)\n self.assertEqual(recs[4].service_end_date, 25.5)\n self.assertEqual(recs[4].customer_class, 1)\n self.assertEqual(recs[4].original_customer_class, 0)", "def custom_server_priority(srv, ind):\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]", "def test_records_correct_server_id(self):\n def custom_server_priority(srv, ind):\n \"\"\"\n A custom server priority function that priortises server 1 for \n customer class 0 and server 2 for customer class 1.\n \"\"\"\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]\n\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Exponential(rate=1.0)], 'Class 1': [ciw.dists.Exponential(rate=1.0)]\n },\n service_distributions={\n 'Class 0': [ciw.dists.Exponential(rate=200.0)], 'Class 1': [ciw.dists.Exponential(rate=200.0)]\n },\n number_of_servers=[2],\n server_priority_functions=[custom_server_priority],\n )\n ciw.seed(0)\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(50)\n\n all_class_0_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0])\n all_class_1_correct = all([rec.server_id == 1 for rec in Q.get_all_records() if rec.customer_class == 0])\n\n self.assertTrue(all_class_0_correct)\n self.assertTrue(all_class_1_correct)", "def test_server_priority_function_two_nodes(self):\n def prioritise_less_busy(srv, ind):\n return srv.busy_time\n\n def prioritise_highest_id(srv, ind):\n return -srv.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1), ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2), ciw.dists.Exponential(2)],\n number_of_servers=[2, 2],\n routing=[[0, 0], [0, 0]],\n server_priority_functions=[prioritise_less_busy, prioritise_highest_id]\n )\n )\n Q.simulate_until_max_time(1000)\n expected_times_node_1 = [256.2457715650031, 257.59339967047254]\n expected_times_node_2 = [157.35577182806387, 356.41473247082365]\n\n for i, (srv_1, srv_2) in enumerate(zip(Q.nodes[1].servers, Q.nodes[2].servers)):\n self.assertEqual(srv_1.busy_time, expected_times_node_1[i])\n self.assertEqual(srv_2.busy_time, expected_times_node_2[i])", "def test_server_priority_function_allocate_to_last_server_first(self):\n def get_server_busy_time(server, ind):\n return -server.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [158.68745586286119, 331.0719836410557]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def test_server_priority_function_allocate_to_less_busy(self):\n def get_server_busy_time(server, ind):\n return server.busy_time\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_distributions=[ciw.dists.Exponential(2)],\n number_of_servers=[2],\n server_priority_functions=[get_server_busy_time]\n )\n )\n Q.simulate_until_max_time(1000)\n\n expected_times = [245.07547532640024, 244.68396417751663]\n for i, srv in enumerate(Q.nodes[1].servers):\n self.assertEqual(srv.busy_time, expected_times[i])", "def __init__(self, num_clients, num_servers, iterations):\n\n global len_per_iteration\n\n digits = load_digits() # using sklearn's MNIST dataset\n X, y = digits.data, digits.target\n\n scaler = MinMaxScaler()\n scaler.fit(X)\n X = scaler.transform(X)\n\n X_train, X_test = X[:-config.LEN_TEST], X[-config.LEN_TEST:]\n y_train, y_test = y[:-config.LEN_TEST], y[-config.LEN_TEST:]\n\n # extract only amount that we require\n number_of_samples = 0\n for client_name in config.client_names:\n len_per_iteration = config.LENS_PER_ITERATION[client_name]\n number_of_samples += len_per_iteration * iterations\n\n X_train = X_train[:number_of_samples]\n y_train = y_train[:number_of_samples]\n\n client_to_datasets = data_formatting.partition_data(X_train, y_train, config.client_names, iterations,\n config.LENS_PER_ITERATION, cumulative=config.USING_CUMULATIVE)\n\n #print_config(len_per_iteration=config.LEN_PER_ITERATION)\n print('\\n \\n \\nSTARTING SIMULATION \\n \\n \\n')\n\n active_clients = {'client_agent' + str(i) for i in range(num_clients)}\n self.clients = {\n 'client_agent' + str(i): ClientAgent(agent_number=i,\n train_datasets=client_to_datasets['client_agent' + str(i)],\n evaluator=ModelEvaluator(X_test, y_test),\n active_clients=active_clients) for i in\n range(num_clients)} # initialize the agents\n\n self.server_agents = {'server_agent' + str(i): ServerAgent(agent_number=i) for i in\n range(num_servers)} # initialize servers\n\n # create directory with mappings from names to instances\n self.directory = Directory(clients=self.clients, server_agents=self.server_agents)\n\n for agent_name, agent in self.clients.items():\n agent.set_directory(self.directory)\n agent.initializations()\n for agent_name, agent in self.server_agents.items():\n agent.set_directory(self.directory)\n\n # OFFLINE diffie-helman key exchange\n # NOTE: this is sequential in implementation, but simulated as occuring parallel\n if config.USE_SECURITY:\n key_exchange_start = datetime.datetime.now() # measuring how long the python script takes\n max_latencies = []\n for client_name, client in self.clients.items():\n # not including logic of sending/receiving public keys in latency computation since it is nearly zero\n client.send_pubkeys()\n max_latency = max(config.LATENCY_DICT[client_name].values())\n max_latencies.append(max_latency)\n simulated_time = max(max_latencies)\n\n key_exchange_end = datetime.datetime.now() # measuring runtime\n key_exchange_duration = key_exchange_end - key_exchange_start\n simulated_time += key_exchange_duration\n if config.SIMULATE_LATENCIES:\n print(\n 'Diffie-helman key exchange simulated duration: {}\\nDiffie-helman key exchange real run-time: {}\\n'.format(\n simulated_time, key_exchange_duration))\n\n for client_name, client in self.clients.items():\n client.initialize_common_keys()", "def gbp_crud_policy_classifier(self):\n try:\n self.__set_tenant_info()\n self.tc_id = inspect.stack()[0][3]\n # initialize result dict.\n self.__set_result_dict(self.result_dict.keys(), \"\")\n\n protocol_list = [\"tcp\", \"udp\", \"icmp\"]\n direction = [\"in\", \"out\", \"bi\"]\n port_range = [\"80\", \"80:13001\"]\n classifier_id_list = []\n\n LOG_OBJ.debug(\"#################################################\")\n LOG_OBJ.debug(\"Starting Test Case : %s\" % self.tc_id)\n LOG_OBJ.debug(\"#################################################\")\n\n tcreason = \"\"\n\n # tenant creation.\n tenant_details = self.__create_tenant_common()\n if not isinstance(tenant_details, tuple):\n tcreason = tenant_details\n self.__set_result_dict(self.result_dict.keys(), tcreason)\n LOG_OBJ.error(tcreason)\n return\n\n # update class objects with new tenant token\n tenant_id = tenant_details[0]\n self.gbp_obj.token = tenant_details[1]\n\n # create policy classifier\n for protocol in protocol_list:\n for dire in direction:\n for port in port_range:\n policy_classifier_name = \"classifier_\" + \"_\" +\\\n protocol + \"_\" + dire + \"_\" + port\n classifier_direction = dire\n classifier_protocol = protocol\n classifier_port = port\n LOG_OBJ.debug(\"Creating policy classifier for \"\n \"protocol=%s, direction=%s, port=%s\"\n % (protocol, dire, port))\n classifier_info = self.gbp_obj.\\\n create_policy_classifier(\n policy_classifier_name,\n direction=classifier_direction,\n protocol=classifier_protocol,\n port_range=classifier_port\n )\n\n if not isinstance(classifier_info, dict):\n tcreason = \"Failed to create Policy classifier\"\\\n \" in %s tenant for %s protocol, %s \"\\\n \"direction %s port\" % (tenant_id, protocol,\n dire, port)\n LOG_OBJ.error(tcreason)\n self.__set_result_dict(self.result_dict.keys(),\n tcreason)\n return\n policy_classifier_id = classifier_info[\"id\"]\n classifier_id_list.append(policy_classifier_id)\n LOG_OBJ.debug(\"Created policy classifier successfully \"\n \"with id : %s\" % policy_classifier_id)\n\n # show policy classifier.\n classifier_info = self.gbp_obj.show_policy_classifier(\n classifier_id=str(policy_classifier_id))\n if not isinstance(classifier_info, dict):\n tcreason = \"Failed to show details of policy classifier\"\\\n \": %s\" % policy_classifier_id\n LOG_OBJ.error(tcreason)\n self.__set_result_dict([\"show\"], tcreason)\n else:\n LOG_OBJ.debug(\"Show policy classifier details successful.\")\n\n # list policy classifier.\n classifier_list = self.gbp_obj.list_policy_classifier()\n if not isinstance(classifier_list, list):\n tcreason = \"failed to list policy classifiers of %s tenant.\"\n LOG_OBJ.error(tcreason)\n self.__set_result_dict([\"list\"], tcreason)\n else:\n LOG_OBJ.debug(\"Successfully listed policy classifier in \"\n \"%s tenant\" % tenant_id)\n\n # update policy classifier.\n updated_classifier_name = \"updated_policy_classifier\"\n updated_classifier_description = \"updated policy description\"\n updated_classifier_info = self.gbp_obj.update_policy_classifier(\n policy_classifier_id, name=updated_classifier_name,\n description=updated_classifier_description\n )\n if not isinstance(updated_classifier_info, dict):\n tcreason = \"Failed to update policy classifier: \"\\\n \"%s\" % policy_classifier_id\n LOG_OBJ.error(\"Failed to update policy classifier: \"\n \"%s\" % policy_classifier_id)\n self.__set_result_dict([\"update\"], tcreason)\n elif updated_classifier_info[\"name\"] != updated_classifier_name\\\n and updated_classifier_info[\"description\"] !=\\\n updated_classifier_description:\n tcreason = \"Failed to update policy classifier: \"\\\n \"%s\" % policy_classifier_id\n LOG_OBJ.error(\"Failed to update policy classifier: \"\n \"%s\" % policy_classifier_id)\n self.__set_result_dict([\"update\"], tcreason)\n else:\n LOG_OBJ.debug(\"Successfully updated policy classifier:\"\n \" %s\" % policy_classifier_id)\n\n # delete policy classifier.\n status = self.gbp_obj.delete_policy_classifier(\n policy_classifier_id)\n if not isinstance(status, bool):\n tcreason = \"Failed to delete policy classifier:\"\\\n \" %s\" % policy_classifier_id\n LOG_OBJ.error(tcreason)\n self.__set_result_dict([\"delete\"], tcreason)\n return\n\n LOG_OBJ.debug(\"Successfully deleted policy classifier:\"\n \" %s\" % policy_classifier_id)\n except Exception as err:\n LOG_OBJ.exception(err)\n tcreason = \"Some problem occurred while policy classifier \"\\\n \"api validation.\"\n self.__set_result_dict(self.result_dict.keys(), tcreason)\n finally:\n # cleaning test resources.\n status = self.__resource_cleanup()\n if not isinstance(status, bool):\n tcreason = \"ERROR: Some problem occurred while \"\\\n \"cleaning resources.\"\n LOG_OBJ.error(tcreason)\n self.__set_result_dict(self.result_dict.keys(), tcreason)\n # update result file with test case result.\n self.__update_result_file()", "def class_calc(self):\n if self.char_class == 0:\n self.wallet += 150\n self.physical += 3\n self.attack_bonus += 1\n self.damage_bonus += 1\n elif self.char_class == 1:\n self.wallet += 125\n self.subterfuge += 3\n elif self.char_class == 2:\n self.wallet += 75\n self.knowledge += 3\n elif self.char_class == 3:\n self.wallet += 125\n self.communication += 3", "def test_state_server_dependent_model_server_attributes():\n num_of_servers = 4\n system_capacity = 10\n buffer_capacity = 10\n\n rates = {}\n for server in range(1, num_of_servers + 1):\n rates[server] = {\n (u, v): 1\n for u in range(buffer_capacity + 1)\n for v in range(system_capacity + 1)\n }\n\n simulation = simulate_model(\n lambda_2=2,\n lambda_1=2,\n mu=rates,\n num_of_servers=num_of_servers,\n threshold=8,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n seed_num=0,\n runtime=100,\n )\n\n for server in simulation.nodes[2].servers:\n assert hasattr(server, \"served_inds\")\n assert hasattr(server, \"service_times\")", "def _create_classifier(self, message = None):\n c = Classifier()\n min_specifity = 10\n total_str = 0\n if self.classifiers:\n for classify in self.classifiers:\n total_str += classify.strength\n average_str = total_str / len(self.classifiers)\n c.strength = average_str\n else:\n c.strength = 100\n\n # Set condition(s)\n elements = range(len(Message.game_msg_index))\n random.shuffle(elements)\n elements_specifity = [ random.randint(1,5) for x in xrange(len(Message.game_msg_index))]\n c.specifity = 0\n\n # Create the condition\n for i,value in enumerate(elements):\n c.specifity += 6 - elements_specifity[i]\n c.conditions[0][value] = \\\n random.sample(xrange(0,5),elements_specifity[i] )\n\n # Ensure message's value is in the condition\n if ( message != None ):\n if ( message.status[value] not in c.conditions[0][value] ):\n c.conditions[0][value][0] = message.status[value]\n\n c.conditions[0][value].sort()\n\n # Stop adding to the condition sometime after you have > min_specifity\n if ( c.specifity > min_specifity ):\n if ( random.random() < float(i) / float(len(Message.game_msg_index) ) ):\n break\n if ( random.random() < 0.01 and c.specifity < min_specifity * 0.3 ):\n c.conditions.append( [ None ] * len( Message.game_msg_index) )\n break\n\n if ( len(c.conditions) == 2 ):\n elements = range(len(Message.game_msg_index))\n random.shuffle(elements)\n elements_specifity = [ random.randint(1,5) for x in xrange(len(Message.game_msg_index))]\n # Create the condition\n for i,value in enumerate(elements):\n c.specifity += 6 - elements_specifity[i]\n c.conditions[1][value] = \\\n random.sample(xrange(0,5),elements_specifity[i] )\n\n # Ensure message's value is in the condition\n if ( message != None ):\n if ( message.status[value] not in c.conditions[1][value] ):\n c.conditions[1][value][0] = message.status[value]\n\n c.conditions[1][value].sort()\n\n # Stop adding to the condition sometime after you have > min_specifity\n if ( c.specifity > min_specifity ):\n if ( random.random() < float(i) / float(len(Message.game_msg_index) ) ):\n break\n if ( random.random() < 0.01 and c.specifity < min_specifity * 0.3 ):\n c.conditions.append( [ None ] * len( Message.game_msg_index) )\n break\n\n # Set output\n output_message = Message()\n output_message.classifier_message()\n output_message.emitter = c\n c.output = output_message, random.choice(['Heal','Mine','Attack','Wait'])\n return c", "def broad_cast(self):\n item_factor = self.server_model.get_item_factor()\n for client in self.clients:\n client.recv_item_factor(item_factor)", "def service_partnership(company_name: str, choice: int):\n sp = [True]\n i = 1\n weight = 0\n if choice == 3 and company_name.lower() == 'huawei':\n return 135\n while i < 10:\n seq = bool(random.getrandbits(1))\n\n i += 1\n\n if seq and sp[-1] is False:\n weight += 135\n\n elif seq is False and sp[-1] is False:\n weight += -22\n\n elif seq is False and sp[-1] is True:\n weight += -68\n sp.append(seq)\n return weight", "def __init__(self, checkBilling=False, estimatedLoad=0.0, ratedPower=0.0, servicePriority='', grounded=False, serviceDeliveryRemark='', phaseCode=\"s12N\", ctptReference=0, ratedCurrent=0.0, ratedVoltage=0.0, SDPLocations=None, ServiceLocation=None, EndDevices=None, MeterReadings=None, ServiceCategory=None, PricingStructures=None, EnergyConsumer=None, ServiceSupplier=None, CustomerAgreement=None, TransformerTanks=None, *args, **kw_args):\n #: True if as a result of an inspection or otherwise, there is a reason to suspect that a previous billing may have been performed with erroneous data. Value should be reset once this potential discrepancy has been resolved.\n self.checkBilling = checkBilling\n\n #: Estimated load.\n self.estimatedLoad = estimatedLoad\n\n #: Power that this service delivery point is configured to deliver.\n self.ratedPower = ratedPower\n\n #: Priority of service for this service delivery point. Note that service delivery points at the same service location can have different priorities.\n self.servicePriority = servicePriority\n\n #: True if grounded.\n self.grounded = grounded\n\n #: Remarks about this service delivery point, for example the reason for it being rated with a non-nominal priority.\n self.serviceDeliveryRemark = serviceDeliveryRemark\n\n #: Phase code. Number of wires and number of phases can be deduced from enumeration literal values. For example, ABCN is three-phase, four-wire. s12n (splitSecondary12N) is single-phase, three-wire. s1n and s2n are single-phase, two-wire. Values are: \"s12N\", \"BN\", \"BC\", \"ABN\", \"s2N\", \"N\", \"ACN\", \"BCN\", \"ABCN\", \"AC\", \"s1N\", \"AN\", \"B\", \"AB\", \"C\", \"A\", \"CN\", \"ABC\"\n self.phaseCode = phaseCode\n\n #: (optional for medium voltage connections) Reference to the low side terminal of a CT or PT that obtain readings from a medium or high voltage point.\n self.ctptReference = ctptReference\n\n #: Current that this service delivery point is configured to deliver.\n self.ratedCurrent = ratedCurrent\n\n #: Nominal service voltage.\n self.ratedVoltage = ratedVoltage\n\n self._SDPLocations = []\n self.SDPLocations = [] if SDPLocations is None else SDPLocations\n\n self._ServiceLocation = None\n self.ServiceLocation = ServiceLocation\n\n self._EndDevices = []\n self.EndDevices = [] if EndDevices is None else EndDevices\n\n self._MeterReadings = []\n self.MeterReadings = [] if MeterReadings is None else MeterReadings\n\n self._ServiceCategory = None\n self.ServiceCategory = ServiceCategory\n\n self._PricingStructures = []\n self.PricingStructures = [] if PricingStructures is None else PricingStructures\n\n self._EnergyConsumer = None\n self.EnergyConsumer = EnergyConsumer\n\n self._ServiceSupplier = None\n self.ServiceSupplier = ServiceSupplier\n\n self._CustomerAgreement = None\n self.CustomerAgreement = CustomerAgreement\n\n self._TransformerTanks = None\n self.TransformerTanks = TransformerTanks\n\n super(ServiceDeliveryPoint, self).__init__(*args, **kw_args)", "def repair_service(self,info_dict,config):\n\tcommand_dict = self._heart_beat_config[\"repair_command\"]\n for item_id in info_dict:\n item = info_dict[item_id]\n if \"class_name\" in item and item['class_name'] in command_dict:\n user = config[\"DEFAULT\"].get(\"user\")\n password = config[\"DEFAULT\"].get(\"password\")\n host = item['server_ip']\n class_name = item['class_name']\n command = command_dict[class_name]\n concurrency_expected = str(item[\"expected_concurrency\"] - item[\"actual_concurrency\"])\n try:\n self.ssh_cmd(user, host, password, command%concurrency_expected)\n except Exception as e:\n logging.error(e)\n #todo: send mail to administrator?", "def test_swarm_updates_parsed_options_when_single_userclass_specified(self):\n\n class User1(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n class User2(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n self.environment.web_ui.userclass_picker_is_active = True\n self.environment.available_user_classes = {\"User1\": User1, \"User2\": User2}\n\n response = requests.post(\n \"http://127.0.0.1:%i/swarm\" % self.web_port,\n data={\n \"user_count\": 5,\n \"spawn_rate\": 5,\n \"host\": \"https://localhost\",\n \"user_classes\": [\"User1\"],\n },\n )\n self.assertListEqual([\"User1\"], response.json()[\"user_classes\"])\n\n # stop\n gevent.sleep(1)\n response = requests.get(\"http://127.0.0.1:%i/stop\" % self.web_port)\n self.assertEqual(response.json()[\"message\"], \"Test stopped\")\n\n # Checking environment.parsed_options.user_classes was updated\n self.assertListEqual(self.environment.parsed_options.user_classes, [\"User1\"])", "def increment_number_served(self, customers):\n self.numbers_serverd += customers", "def class_score(self):\n pass", "def assign_class(otsession_id):\r\n return 200" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to AKS cluster. No auth as of now
def aks_connect(rg, cluster): az("aks get-credentials --resource-group {} --name {}".format(rg, cluster))
[ "def cluster_auth_aws(deployment, project, cluster, zone, service_key):\n\n subprocess.check_call(['aws', 'eks', 'update-kubeconfig',\n '--name', cluster, '--region', zone])", "def login():\n\n # Configure the default client credentials for all possible environments.\n try:\n kubernetes.config.load_incluster_config() # cluster env vars\n logger.debug(\"configured in cluster with service account\")\n except kubernetes.config.ConfigException as e1:\n try:\n kubernetes.config.load_kube_config() # developer's config files\n logger.debug(\"configured via kubeconfig file\")\n except kubernetes.config.ConfigException as e2:\n raise LoginError(f\"Cannot authenticate neither in-cluster, nor via kubeconfig.\")\n\n # Make a sample API call to ensure the login is successful,\n # and convert some of the known exceptions to the CLI hints.\n try:\n api = kubernetes.client.CoreApi()\n api.get_api_versions()\n except urllib3.exceptions.HTTPError as e:\n raise LoginError(\"Cannot connect to the Kubernetes API. \"\n \"Please configure the cluster access.\")\n except kubernetes.client.rest.ApiException as e:\n if e.status == 401:\n raise LoginError(\"Cannot authenticate to the Kubernetes API. \"\n \"Please login or configure the tokens.\")\n else:\n raise", "def cluster_connect(cluster_id):\n cluster_manager = get_cluster_manager()\n node_manager = get_node_manager()\n cluster = cluster_manager.get_cluster_by_id(cluster_id)\n nodes = cluster_manager.get_cluster_nodes_types(cluster_id)\n if cluster.cluster_config.options.ssh_to is not None:\n if cluster.cluster_config.options.ssh_to not in nodes:\n raise ValueError(\n f\"No nodes of type {cluster.cluster_config.options.ssh_to} to \"\n f\"connect\")\n nodes = node_manager.get_nodes_by_id(\n nodes[cluster.cluster_config.options.ssh_to])\n print(f\"Connecting to node: {nodes[0].node_id} \"\n f\"({cluster.cluster_config.options.ssh_to})\")\n e = ShellInvoker(nodes[0], cluster_defaults.base_defaults.private_path)\n e.run()\n else:\n nodes = cluster_manager.get_all_cluster_nodes(cluster_id)\n if not nodes:\n raise ValueError(\"No nodes in the cluster\")\n nodes = node_manager.get_nodes_by_id(nodes)\n e = ShellInvoker(nodes[0], cluster_defaults.base_defaults.private_path)\n e.run()\n return 0", "def _connect(self):\n self.session = boto3.Session(aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key,\n region_name=self.default_region)", "def connect_to_opsworks():\n try:\n aws_cfg\n except NameError:\n aws_cfg = load_aws_cfg()\n\n return boto.connect_opsworks(aws_access_key_id=aws_cfg.get(\"aws\", \"access_key_id\"),\n aws_secret_access_key=aws_cfg.get(\"aws\", \"secret_access_key\"))", "def create_ec2_connection(self): \n logger.info(\"connecting to EC2 cluster\")\n self.conn = boto.ec2.connect_to_region(self.region,aws_access_key_id = self.AWS_ACCESS_KEY_ID,aws_secret_access_key =self.AWS_SECRET_ACCESS_KEY)\n logger.info(\"connection successful\")", "def __cassandra_connect(self):\n for i in range(10):\n try:\n self.cluster = Cluster()#['panoptes-cassandra.zooniverse.org'],protocol_version = 3)\n self.cassandra_session = self.cluster.connect('zooniverse')\n return\n except cassandra.cluster.NoHostAvailable:\n pass\n\n assert False", "def can_connect_to_cluster():\n url = 'https://travelimperial.azurehdinsight.net/templeton/v1/status'\n resp = requests.get(url, auth=(CLUSTER_USER, CLUSTER_PASS))\n print(resp.status_code)\n return (resp.status_code == 200)", "def __connect_with_credentials(self):\n\t\tself.client_.username_pw_set(\"xgvutxaa\", \"9cMIpVoL4Ujj\")\n\t\tself.client_.connect('spectacular-pharmacist.cloudmqtt.com',1883,3600)", "def try_connecting(username='', password=''):\n\n if username and password:\n ap = AuthenticationTests.get_authentication_provider(username, password)\n else:\n ap = None\n\n maxwait = 120 # in seconds\n sleeptime = 1\n\n wait_time = 0\n while wait_time < maxwait:\n try:\n cluster = Cluster(protocol_version=tests.integration.PROTOCOL_VERSION, auth_provider=ap)\n cluster.connect()\n log.debug(\"Can connect after %d seconds\" % wait_time)\n return True\n except Exception:\n wait_time += sleeptime\n time.sleep(sleeptime)\n\n return False", "def connect_to_consul():\n try:\n payload = \"\"\n headers = {'cache-control': 'no-cache'}\n print(\"Connecting to Consul backend\")\n response = requests.request(\"GET\", consul_url, data=payload, headers=headers)\n print(\"Retrieving KV for vault...\")\n print(\"Value for Key '{}': {}\".format(consul_kv_key, response.text))\n except requests.exceptions.RequestException as e:\n print(e)", "async def connect(cls):\n LOGGER.info(f'Create connection with zookeeper host %s and port %s', Configs['ZOOKEEPER_HOST'], Configs['ZOOKEEPER_PORT'])\n cls._connection = aiozk.ZKClient(f\"{Configs['ZOOKEEPER_HOST']}:{Configs['ZOOKEEPER_PORT']}\")\n while True:\n try:\n await cls._connection.start()\n break\n except Exception as e:\n LOGGER.error('Issue with zookeeper connection %s and try reconnect every 3 sec', e)\n await asyncio.sleep(3)", "def connect(self):\n mongo_cluster_data = open('db_config.json').read()\n data = json.loads(mongo_cluster_data)\n self.logger.info('connecting to database.')\n self._conn = pymongo.MongoClient(host=data['SRVAdd'])\n self.logger.info('connected to database.')", "def test_07_deploy_kubernetes_ha_cluster(self):\n if self.setup_failed == True:\n self.fail(\"Setup incomplete\")\n if self.default_network:\n self.skipTest(\"HA cluster on shared network requires external ip address, skipping it\")\n global k8s_cluster\n k8s_cluster = self.getValidKubernetesCluster(1, 2)\n self.debug(\"HA Kubernetes cluster with ID: %s successfully deployed\" % k8s_cluster.id)\n return", "def cli_cosmosdb_managed_cassandra_cluster_start(client,\n resource_group_name,\n cluster_name):\n\n return client.begin_start(resource_group_name, cluster_name)", "def test_dcos_client_api(mock_dcos_client):\n args = dcos_api.DcosApiSession.get_args_from_env()\n args['auth_user'] = None\n cluster = dcos_api.DcosApiSession(**args)\n # no assert necessary, just make sure that this function signatures works\n r = cluster.get('', node='123.123.123.123')\n r.raise_for_status()\n cluster.get('')\n cluster.post('')\n cluster.put('')\n cluster.delete('')\n cluster.head('')\n cluster.patch('')\n cluster.options('')", "def cluster_auth(deployment):\n config = get_config(deployment)\n\n if 'cluster' in config:\n cluster = config['cluster']\n provider = cluster.get('provider')\n if provider == 'gcloud':\n cluster_auth_gcloud(\n deployment, **cluster['gcloud']\n )\n elif provider == 'aws':\n cluster_auth_aws(\n deployment, **cluster['aws']\n )\n else:\n raise ValueError(\n f'Unknown provider {provider} found in hubploy.yaml')", "def __init__(self, keyspace = 'casi_test1', ip = '127.0.0.1'):\n ips = []\n ips.append(ip)\n self.cluster = Cluster(ips)\n self.session = self.cluster.connect(keyspace)\n self.session.row_factory = named_tuple_factory", "def cassandra_connection():\n cluster = Cluster(['127.0.0.1'], port=9042)\n session = cluster.connect()\n session.execute(\"\"\"CREATE KEYSPACE IF NOT EXISTS songs WITH REPLICATION ={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\"\"\")\n session.set_keyspace('songs')\n return session, cluster" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add helm repo to the attached k8 cluster
def helm_repo_add(name): subprocess.call(["helm", "repo", "add", name+"-stable","https://syashfr.github.io/"+name])
[ "def setup_helm():\n subprocess.check_output([\n 'helm', 'init', '--upgrade',\n ])\n # wait for tiller to come up\n subprocess.check_call([\n 'kubectl', 'rollout', 'status',\n '--namespace', 'kube-system',\n '--watch', 'deployment', 'tiller-deploy',\n ])", "def helm_add_chart_repos(repos):\n for repo_name in repos:\n repo_url = repos[repo_name]\n logging.info(\"Adding Helm Chart Repo {0} at {1}\".format(repo_name, repo_url))\n helm_add_chart_repo(repo_name, repo_url)", "def install(name):\n repo = TemplateRepository()\n command = run(\n \"kubectl.exe apply -f %s\" % repo.url(name), capture_output=True, shell=True\n )\n click.echo(command.stdout.decode())", "def deploy_chartmuseum():\n logger.info(\"Start to deploy chartmuseum\")\n cmd = \"helm repo add test https://chartmuseum.github.io/charts\"\n check_output(cmd, shell=True).decode('utf-8')\n cmd = \"kubectl create namespace test\"\n check_output(cmd, shell=True).decode('utf-8')\n\n cmd = \"helm install test test/chartmuseum --version 3.1.0 --namespace test --set env.open.DISABLE_API=false\"\n check_output(cmd, shell=True).decode('utf-8')\n wait(lambda: is_chartmuseum_up(), sleep_seconds=10, timeout_seconds=60, waiting_for=\"chartmuseum to be ready\")\n\n time.sleep(10)\n chartmuseum_url = subprocess.run(\"kubectl get services -n test | grep test-chartmuseum | awk '{print $3}'\", shell=True, check=True, stdout=subprocess.PIPE).stdout.decode('utf-8').strip()+\":8080\"\n cmd = f\"curl -X POST --data-binary @{dname}/resources/cl-test-helm-chart/oru-app-1.0.0.tgz http://{chartmuseum_url}/api/charts\"\n check_output(cmd, shell=True).decode('utf-8')\n cmd = f\"curl -X POST --data-binary @{dname}/resources/cl-test-helm-chart/odu-app-1.0.0.tgz http://{chartmuseum_url}/api/charts\"\n check_output(cmd, shell=True).decode('utf-8')\n cmd = f\"curl -X POST --data-binary @{dname}/resources/cl-test-helm-chart/odu-app-ics-version-1.0.0.tgz http://{chartmuseum_url}/api/charts\"\n check_output(cmd, shell=True).decode('utf-8')", "def install_backend(self, name, image, config_file, healthcheck_path='/',\n root_path='/', namespace='default', replicas=2,\n force=False):\n\n try:\n config_data = yaml.load(open(config_file))\n print(\"Loading config file: {}\".format(config_file))\n except yaml.YAMLError as e:\n print('Config file should be valid JSON or YAML, error: {}'.format(e))\n except Exception as e:\n print(\"Unable to read config file, error: {}\".format(e))\n sys.exit(1)\n\n api_client = _get_k8s_api_client()\n api = client.VersionApi(api_client)\n core_v1 = client.CoreV1Api(api_client)\n ext_v1 = client.ExtensionsV1beta1Api(api_client)\n\n try:\n api.get_code()\n print(\"Connected to cluster - {}\".format(api_client.host))\n except Exception as e:\n print(\"Unable to communicate with k8s cluster, error: {}\".format(e))\n sys.exit(1)\n\n try:\n _create_secret(name, config_data, core_v1, namespace, force)\n print(\"Create config [ok]\")\n except Exception as e:\n print(\"Can't create config in cluster, error: {}\".format(e))\n sys.exit(1)\n\n try:\n _create_deployment(name, image, ext_v1, healthcheck_path, replicas,\n namespace, force, core_api=core_v1)\n print(\"Create deployment [ok]\")\n except Exception as e:\n print(\"Can't create deployment in cluster, error: {}\".format(e))\n sys.exit(1)\n\n try:\n _create_service(name, core_v1, namespace, force)\n print(\"Create service [ok]\")\n except Exception as e:\n print(\"Can't create deployment in cluster, error: {}\".format(e))\n sys.exit(1)\n\n print(\"Checking service availability\")\n\n try:\n ip = _polling_service_access(name, core_v1, namespace, timeout=180)\n print(\"Expose service [ok]\")\n print(\"Connector backend - http://{}/{}\".format(ip, root_path.lstrip('/')))\n except Exception as e:\n print(\"Service expose FAILED, error: {}\".format(e))\n sys.exit(1)\n\n print(\"[Success]\")", "def helmdocs(ctx, env=\"container\"):\n version = \"1.10.0\"\n cmd = \"helm-docs\"\n\n if env == \"container\":\n run(\"docker run --rm -v $(git rev-parse --show-toplevel):/app -w /app jnorwood/helm-docs:v{} {}\".format(version, cmd), echo=True)\n elif env == \"host\":\n run(cmd)\n else:\n raise Exit(message=\"Unsupported helm-docs environment: {}\". format(env))", "def setup_kubeflow_ks_app(dir, namespace, github_token, api_client):\n util.makedirs(dir)\n\n logging.info(\"Using test directory: %s\", dir)\n\n namespace_name = namespace\n\n namespace = _setup_test(api_client, namespace_name)\n logging.info(\"Using namespace: %s\", namespace)\n if github_token:\n logging.info(\"Setting GITHUB_TOKEN to %s.\", github_token)\n # Set a GITHUB_TOKEN so that we don't rate limited by GitHub;\n # see: https://github.com/ksonnet/ksonnet/issues/233\n os.environ[\"GITHUB_TOKEN\"] = github_token\n\n if not os.getenv(\"GITHUB_TOKEN\"):\n logging.warning(\"GITHUB_TOKEN not set; you will probably hit Github API \"\n \"limits.\")\n # Initialize a ksonnet app.\n app_name = \"kubeflow-test-\" + uuid.uuid4().hex[0:4]\n util.run(\n [\n \"ks\",\n \"init\",\n app_name,\n ], cwd=dir)\n\n app_dir = os.path.join(dir, app_name)\n\n kubeflow_registry = \"github.com/kubeflow/kubeflow/tree/master/kubeflow\"\n util.run(\n [\"ks\", \"registry\", \"add\", \"kubeflow\", kubeflow_registry], cwd=app_dir)\n\n # Install required packages\n packages = [\"kubeflow/core\", \"kubeflow/tf-serving\", \"kubeflow/tf-job\", \"kubeflow/pytorch-job\", \"kubeflow/argo\"]\n\n # Instead of installing packages we edit the app.yaml file directly\n #for p in packages:\n # util.run([\"ks\", \"pkg\", \"install\", p], cwd=app_dir)\n app_file = os.path.join(app_dir,\"app.yaml\")\n with open(app_file) as f:\n app_yaml = yaml.load(f)\n\n libraries = {}\n for pkg in packages:\n pkg = pkg.split(\"/\")[1]\n libraries[pkg] = {'gitVersion':{'commitSha': 'fake', 'refSpec': 'fake'}, 'name': pkg, 'registry': \"kubeflow\"}\n app_yaml['libraries'] = libraries\n\n with open(app_file, \"w\") as f:\n yaml.dump(app_yaml, f)\n\n # Create vendor directory with a symlink to the src\n # so that we use the code at the desired commit.\n target_dir = os.path.join(app_dir, \"vendor\", \"kubeflow\")\n\n REPO_ORG = \"kubeflow\"\n REPO_NAME = \"kubeflow\"\n REGISTRY_PATH = \"kubeflow\"\n source = os.path.join(dir, \"src\", REPO_ORG, REPO_NAME,\n REGISTRY_PATH)\n logging.info(\"Creating link %s -> %s\", target_dir, source)\n os.symlink(source, target_dir)\n\n return app_dir", "def add_node_after_upgrade_neutron_ceph(self):\n self.env.revert_snapshot('upgrade_master_neutron_ceph')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[6:7])\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-07': ['controller']}\n )\n\n self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id,\n progress=60)\n self.fuel_web.stop_deployment_wait(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:7],\n timeout=8 * 60\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'sanity', 'smoke'])", "def install_kubectl(\n cmd, client_version=\"latest\", install_location=None, source_url=None\n):\n\n if not source_url:\n source_url = \"https://storage.googleapis.com/kubernetes-release/release\"\n cloud_name = cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurechinacloud\":\n source_url = \"https://mirror.azure.cn/kubernetes/kubectl\"\n\n if client_version == \"latest\":\n context = _ssl_context()\n version = urlopen(source_url + \"/stable.txt\", context=context).read()\n client_version = version.decode(\"UTF-8\").strip()\n else:\n client_version = \"v%s\" % client_version\n\n file_url = \"\"\n system = platform.system()\n base_url = source_url + \"/{}/bin/{}/amd64/{}\"\n\n # ensure installation directory exists\n if install_location is None:\n install_location = _get_default_install_location(\"kubectl\")\n install_dir, cli = os.path.dirname(install_location), os.path.basename(\n install_location\n )\n if not os.path.exists(install_dir):\n os.makedirs(install_dir)\n\n if system == \"Windows\":\n file_url = base_url.format(client_version, \"windows\", \"kubectl.exe\")\n elif system == \"Linux\":\n # TODO: Support ARM CPU here\n file_url = base_url.format(client_version, \"linux\", \"kubectl\")\n elif system == \"Darwin\":\n file_url = base_url.format(client_version, \"darwin\", \"kubectl\")\n else:\n raise CLIError(\n \"Proxy server ({}) does not exist on the cluster.\".format(system)\n )\n\n logger.warning('Downloading client to \"%s\" from \"%s\"', install_location, file_url)\n try:\n _urlretrieve(file_url, install_location)\n os.chmod(\n install_location,\n os.stat(install_location).st_mode\n | stat.S_IXUSR\n | stat.S_IXGRP\n | stat.S_IXOTH,\n )\n except IOError as ex:\n raise CLIError(\n \"Connection error while attempting to download client ({})\".format(ex)\n )\n\n if (\n system == \"Windows\"\n ): # be verbose, as the install_location likely not in Windows's search PATHs\n env_paths = os.environ[\"PATH\"].split(\";\")\n found = next(\n (x for x in env_paths if x.lower().rstrip(\"\\\\\") == install_dir.lower()),\n None,\n )\n if not found:\n # pylint: disable=logging-format-interpolation\n logger.warning(\n 'Please add \"{0}\" to your search PATH so the `{1}` can be found. 2 options: \\n'\n ' 1. Run \"set PATH=%PATH%;{0}\" or \"$env:path += \\'{0}\\'\" for PowerShell. '\n \"This is good for the current command session.\\n\"\n \" 2. Update system PATH environment variable by following \"\n '\"Control Panel->System->Advanced->Environment Variables\", and re-open the command window. '\n \"You only need to do it once\".format(install_dir, cli)\n )\n else:\n logger.warning(\n \"Please ensure that %s is in your search PATH, so the `%s` command can be found.\",\n install_dir,\n cli,\n )", "def add_label(self, repo, manifestref, data):\n url = self._manifest_url(repo, manifestref, '')\n response = self.rest.post(url, data)\n\n if response.status_code is not 201:\n self.module.fail_json(msg=response.json)\n return response.info", "def eks(ctx):\n pass", "def install(self, req, cluster_id):\n\n #instl.pxe_server_build(req, install_meta)\n # get hosts config which need to install OS\n #hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id)\n # if have hosts need to install os, ZENIC installataion executed in OSInstallTask\n #if hosts_need_os:\n #os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os)\n #os_install_thread = Thread(target=os_install_obj.run)\n #os_install_thread.start()\n #else:\n LOG.info(_(\"No host need to install os, begin install ZENIC for cluster %s.\" % cluster_id))\n zenic_install_task = instl.ZENICInstallTask(req, cluster_id)\n zenic_install_task.start()\n\t\t\n LOG.info((_(\"begin install zenic, please waiting....\")))\n time.sleep(5)\n LOG.info((_(\"install zenic successfully\")))", "def _check_helmrepository_creation(self, namespace, name):\n kube_operator = kubernetes.KubeOperator()\n helmrepo = kube_operator.get_custom_resource(\n constants.FLUXCD_CRD_HELM_REPO_GROUP,\n constants.FLUXCD_CRD_HELM_REPO_VERSION,\n namespace,\n constants.FLUXCD_CRD_HELM_REPO_PLURAL,\n name\n )\n if helmrepo is None:\n msg = \"HelmRepository %s on namespace %s: creation timeout\" \\\n % (namespace, name)\n LOG.error(msg)\n raise exception.SysinvException(_(msg))", "def deploy_odf_addon(self):\n logger.info(\"Deploying odf with ocs addon.\")\n clustername = config.ENV_DATA.get(\"cluster_name\")\n ocs_version = version.get_semantic_ocs_version_from_config()\n cmd = (\n f\"ibmcloud ks cluster addon enable openshift-data-foundation --cluster {clustername} -f --version \"\n f\"{ocs_version}.0\"\n )\n run_ibmcloud_cmd(cmd)\n time.sleep(120)\n logger.info(\"Ocs addon started enabling.\")", "def add_node_after_upgrade_nova_cinder(self):\n self.env.revert_snapshot('upgrade_master_nova_cinder')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:4])\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-04': ['controller']}\n )\n\n self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id,\n progress=60)\n self.fuel_web.stop_deployment_wait(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:4],\n timeout=8 * 60\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id)\n\n self.env.make_snapshot('add_node_after_upgrade_nova_cinder')", "def start(args):\n\n # pylint: disable=too-many-locals\n # Pylint doesn't want more than 15 local variables in a function; this one has 17. This is about\n # as low as I want to go because, while I can cheat and stuff unrelated things in a dictionary,\n # that won't improve readability.\n\n uuid = str(uuid4())\n container_cluster_config_dir = join(CLUSTERDOCK_VOLUME, uuid, 'config')\n makedirs(container_cluster_config_dir)\n\n for mount in client.inspect_container(get_clusterdock_container_id())['Mounts']:\n if mount['Destination'] == CLUSTERDOCK_VOLUME:\n host_cluster_config_dir = join(mount['Source'], uuid, 'config')\n break\n else:\n raise Exception(\"Could not find source of {0} mount.\".format(CLUSTERDOCK_VOLUME))\n\n # CLUSTERDOCK_VOLUME/uuid/config in the clusterdock container corresponds to\n # host_cluster_config_dir on the Docker host.\n logger.debug(\"Creating directory for cluster configuration files in %s...\",\n host_cluster_config_dir)\n\n # Generate the image name to use from the command line arguments passed in.\n image = '/'.join(\n [item\n for item in [args.registry_url, args.namespace or DEFAULT_APACHE_NAMESPACE,\n \"clusterdock:{os}_java-{java}_hadoop-{hadoop}_hbase-{hbase}\".format(\n os=args.operating_system, java=args.java_version,\n hadoop=args.hadoop_version, hbase=args.hbase_version\n )]\n if item]\n )\n if args.always_pull or not is_image_available_locally(image):\n pull_image(image)\n\n # Before starting the cluster, we create a throwaway container from which we copy\n # configuration files back to the host. We also use this container to run an HBase\n # command that returns the port of the HBase master web UI. Since we aren't running init here,\n # we also have to manually pass in JAVA_HOME as an environmental variable.\n get_hbase_web_ui_port_command = ('/hbase/bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool '\n 'hbase.master.info.port')\n container_id = client.create_container(image=image, command=get_hbase_web_ui_port_command,\n environment={'JAVA_HOME': '/java'})['Id']\n logger.debug(\"Created temporary container (id: %s) from which to copy configuration files.\",\n container_id)\n\n # Actually do the copying of Hadoop configs...\n _copy_container_folder_to_host(container_id, '/hadoop/etc/hadoop',\n join(container_cluster_config_dir, 'hadoop'),\n join(host_cluster_config_dir, 'hadoop'))\n\n # ... and repeat for HBase configs.\n _copy_container_folder_to_host(container_id, '/hbase/conf',\n join(container_cluster_config_dir, 'hbase'),\n join(host_cluster_config_dir, 'hbase'))\n\n logger.info(\"The /hbase/lib folder on containers in the cluster will be volume mounted \"\n \"into %s...\", join(host_cluster_config_dir, 'hbase-lib'))\n _copy_container_folder_to_host(container_id, '/hbase/lib',\n join(container_cluster_config_dir, 'hbase-lib'),\n join(host_cluster_config_dir, 'hbase-lib'))\n\n # Every node in the cluster will have a shared volume mount from the host for Hadoop and HBase\n # configuration files as well as the HBase lib folder.\n shared_volumes = [{join(host_cluster_config_dir, 'hadoop'): '/hadoop/etc/hadoop'},\n {join(host_cluster_config_dir, 'hbase'): '/hbase/conf'},\n {join(host_cluster_config_dir, 'hbase-lib'): '/hbase/lib'}]\n\n # Get the HBase master web UI port, stripping the newline the Docker REST API gives us.\n client.start(container=container_id)\n if client.wait(container=container_id) == EX_OK:\n hbase_master_web_ui_port = client.logs(container=container_id).rstrip()\n client.remove_container(container=container_id, force=True)\n else:\n raise Exception('Failed to remove HBase configuration container.')\n\n # Create the Node objects. These hold the state of our container nodes and will be started\n # at Cluster instantiation time.\n primary_node = Node(hostname=args.primary_node[0], network=args.network,\n image=image, ports=[NAMENODE_WEB_UI_PORT,\n hbase_master_web_ui_port,\n RESOURCEMANAGER_WEB_UI_PORT,\n HBASE_REST_SERVER_PORT],\n volumes=shared_volumes)\n secondary_nodes = []\n for hostname in args.secondary_nodes:\n # A list of service directories will be used to name folders on the host and, appended\n # with an index, in the container, as well (e.g. /data1/node-1/dfs:/dfs1).\n service_directories = ['dfs', 'yarn']\n\n # Every Node will have shared_volumes to let one set of configs on the host be propagated\n # to every container. If --data-directories is specified, this will be appended to allow\n # containers to use multiple disks on the host.\n volumes = shared_volumes[:]\n if args.data_directories:\n data_directories = args.data_directories.split(',')\n volumes += [{join(data_directory, uuid, hostname, service_directory):\n \"/{0}{1}\".format(service_directory, i)}\n for i, data_directory in enumerate(data_directories, start=1)\n for service_directory in service_directories]\n secondary_nodes.append(Node(hostname=hostname,\n network=args.network,\n image=image,\n volumes=volumes))\n\n Cluster(topology='apache_hbase',\n node_groups=[NodeGroup(name='primary', nodes=[primary_node]),\n NodeGroup(name='secondary', nodes=secondary_nodes)],\n network_name=args.network).start()\n\n # When creating configs, pass in a dictionary of wildcards into create_configurations_from_file\n # to transform placeholders in the configurations.cfg file into real values.\n _create_configs_from_file(filename=args.configurations,\n cluster_config_dir=container_cluster_config_dir,\n wildcards={\"primary_node\": args.primary_node,\n \"secondary_nodes\": args.secondary_nodes,\n \"all_nodes\": args.primary_node + args.secondary_nodes,\n \"network\": args.network})\n\n # After creating configurations from the configurations.cfg file, update hdfs-site.xml and\n # yarn-site.xml to use the data directories passed on the command line.\n if args.data_directories:\n _update_config_for_data_dirs(\n container_cluster_config_dir=container_cluster_config_dir,\n data_directories=data_directories\n )\n\n if not args.dont_start_services:\n _start_services(primary_node, hbase_master_web_ui_port=hbase_master_web_ui_port)", "def _setup_latest_repo(ctx, config):\n with parallel():\n for remote in ctx.cluster.remotes.keys():\n if remote.os.package_type == 'rpm':\n # pre-cleanup\n remote.run(args=['sudo', 'rm', run.Raw('/etc/yum.repos.d/rh*')],\n check_status=False)\n remote.run(args=['sudo', 'yum', 'clean', 'metadata'])\n if not remote.os.version.startswith('8'):\n remote.run(args=['sudo', 'yum', 'update', 'metadata'])\n # skip is required for beta iso testing\n if config.get('skip-subscription-manager', False) is True:\n log.info(\"Skipping subscription-manager command\")\n else:\n remote.run(args=['sudo', 'subscription-manager', 'repos',\n run.Raw('--disable=*ceph*')],\n check_status=False\n )\n base_url = config.get('base-repo-url', '')\n installer_url = config.get('installer-repo-url', '')\n repos = ['MON', 'OSD', 'Tools', 'Calamari', 'Installer']\n installer_repos = ['Agent', 'Main', 'Installer']\n if config.get('base-rh-repos'):\n repos = ctx.config.get('base-rh-repos')\n if config.get('installer-repos'):\n installer_repos = ctx.config.get('installer-repos')\n # create base repo\n if base_url.startswith('http'):\n repo_to_use = _get_repos_to_use(base_url, repos)\n base_repo_file = NamedTemporaryFile(mode='w', delete=False)\n _create_temp_repo_file(repo_to_use, base_repo_file)\n remote.put_file(base_repo_file.name, base_repo_file.name)\n remote.run(args=['sudo', 'cp', base_repo_file.name,\n '/etc/yum.repos.d/rh_ceph.repo'])\n remote.run(args=['sudo', 'yum', 'clean', 'metadata'])\n if installer_url.startswith('http'):\n irepo_to_use = _get_repos_to_use(\n installer_url, installer_repos)\n installer_file = NamedTemporaryFile(delete=False)\n _create_temp_repo_file(irepo_to_use, installer_file)\n remote.put_file(installer_file.name, installer_file.name)\n remote.run(args=['sudo', 'cp', installer_file.name,\n '/etc/yum.repos.d/rh_inst.repo'])\n remote.run(args=['sudo', 'yum', 'clean', 'metadata'])\n if not remote.os.version.startswith('8'):\n remote.run(args=['sudo', 'yum', 'update', 'metadata'])\n else:\n if config.get('deb-repo-url'):\n deb_repo = config.get('deb-repo-url')\n deb_gpg_key = config.get('deb-gpg-key', None)\n set_deb_repo(remote, deb_repo, deb_gpg_key)", "def main(kdds_fname, numberOfGCL):\n\n cfg = read_config(\"config_cluster.json\")\n\n CID = cfg[\"CID\"]\n ZID = cfg[\"ZID\"]\n mtype = cfg[\"machine-type\"]\n\n docker = cfg[\"docker\"]\n gcr = cfg[\"gcr\"]\n project = cfg[\"project\"]\n\n print(\"From config_cluster.json:\")\n print(CID,ZID,mtype,docker,gcr,project)\n\n print(\"Reading KDDs list from {0}\".format(kdds_fname))\n\n Kdds = ReadKddsToBeCalculated(kdds_fname)\n\n print(\"To compute KDDs: {0}\".format(len(Kdds)))\n\n print(\"Making cluster with nodes: {0}\".format(numberOfGCL))\n\n rc = make_cluster(CID, mtype, numberOfGCL, ZID)\n if rc != 0:\n print(\"Cannot make cluster\")\n sys.exit(1)\n\n rc = auth_cluster(CID, ZID)\n if rc != 0:\n print(\"Cannot make auth\")\n sys.exit(1)\n\n docker2run = os.path.join(gcr, project, docker) # full path to docker\n\n for kdd in Kdds:\n pod_name = make_json_pod(\"tempod.json\", kdd, docker2run)\n cmd = \"kubectl create -f \" + pod_name\n rc = 0\n for k in range(0, 12): # several attempts to make a pod\n rc = subprocess.call(cmd, shell=True)\n if rc == 0:\n time.sleep(0.5)\n break\n\n if rc != 0:\n print(\"Cannot make kdd {0}\".format(kdd))\n sys.exit(1)", "def test_07_deploy_kubernetes_ha_cluster(self):\n if self.setup_failed == True:\n self.fail(\"Setup incomplete\")\n if self.default_network:\n self.skipTest(\"HA cluster on shared network requires external ip address, skipping it\")\n global k8s_cluster\n k8s_cluster = self.getValidKubernetesCluster(1, 2)\n self.debug(\"HA Kubernetes cluster with ID: %s successfully deployed\" % k8s_cluster.id)\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes statistical confidence interval of the results from standard deviation and number of iterations
def _confidence_interval(self, std): return 1.96 * std / np.sqrt(self.n_iter)
[ "def byConfidenceInterval(self) -> ConfidenceIntervalResults:\n global_mean: Rational = Moment.mean(self.data)\n\n upper, lower = ops.splitList(self.data.data, lambda obs: obs <= global_mean)\n upper_std_dev: Rational = Moment.std_dev(Vector(upper))\n lower_std_dev: Rational = Moment.std_dev(Vector(lower))\n np_upper = np.std(upper)\n np_lower = np.std(lower)\n\n upper_outliers, upper_data = ops.splitList(upper, lambda obs: obs <= global_mean + upper_std_dev)\n lower_outliers, lower_data = ops.splitList(lower, lambda obs: obs >= global_mean - lower_std_dev)\n\n return ConfidenceIntervalResults(\n global_mean,\n upper_std_dev,\n lower_std_dev,\n upper_data + lower_data, \n Vector(lower_outliers).sort().data, \n Vector(upper_outliers).sort().data\n )", "def boot_stdev_ci(data,conf,nboots=1000):\n\n ptilemin = (100.-conf)/2.\n ptilemax = conf + (100-conf)/2.\n\n samplesize = data.size\n ranu = np.random.uniform(0,samplesize,nboots*samplesize)\n ranu = np.floor(ranu).astype(int)\n\n bootdat = np.array(data[ranu])\n bootdat = bootdat.reshape([samplesize,nboots])\n\n bootstdev = np.std(bootdat, axis=0)\n minci = np.percentile(bootstdev, ptilemin)\n maxci = np.percentile(bootstdev, ptilemax)\n\n return minci, maxci", "def _confidence_for_samples(self, distribution):\n self.conf_interval_low = self.mean - (distribution * self.std_dev / math.sqrt(self.num_samples))\n self.conf_interval_high = self.mean + (distribution * self.std_dev / math.sqrt(self.num_samples))\n \n self.conf_interval_size_abs = (self.conf_interval_high\n - self.conf_interval_low)\n self.conf_interval_size = self.conf_interval_size_abs / self.mean", "def bootstrap_confidence_interval(sample, stat_function=np.mean, resamples=1000, ci=95):\n boostrap_samples = bootstrap(sample, resamples=resamples)\n bootstrap_samples_stat = list(map(stat_function, boostrap_samples))\n low_bound = (100. - ci) / 2\n high_bound = 100. - low_bound\n lower_ci, upper_ci = np.percentile(bootstrap_samples_stat,\n [low_bound, high_bound])\n return lower_ci, upper_ci #, bootstrap_samples_stat", "def boostrapping_confidence_interval(pred_all, gs_all, eva_func, ci):\n import numpy as np\n import random\n # set random seed\n random.seed(0)\n\n # prediction-groundtruth pairs from all five fold cross validation\n tmp = np.array([pred_all, gs_all]).T\n # calculate overall correlation\n mb = eva_func(tmp[:,0], tmp[:,1])\n # start boostrapping ...\n eva_all = []\n for i in range(100):\n tmp_new = random.choices(tmp, k = len(tmp))\n tmp_new = np.array(tmp_new)\n eva = eva_func(tmp_new[:,0], tmp_new[:,1])\n eva_all.append(eva)\n eva_all = sorted(eva_all)\n #print(eva_all)\n lb = eva_all[round(100*(0.5-ci*0.5))]\n ub = eva_all[round(100*(0.5+ci*0.5))]\n return mb, lb, ub", "def _compute_bootstrapped_statistics(\n measured_values,\n measured_stds,\n estimated_values,\n estimated_stds,\n statistics=None,\n percentile=0.95,\n bootstrap_iterations=1000,\n):\n\n sample_count = len(measured_values)\n\n # Compute the mean of the statistics.\n mean_statistics, statistics_labels = _compute_statistics(\n measured_values, estimated_values, statistics\n )\n\n # Generate the bootstrapped statistics samples.\n sample_statistics = numpy.zeros((bootstrap_iterations, len(mean_statistics)))\n\n for sample_index in range(bootstrap_iterations):\n\n samples_indices = numpy.random.randint(\n low=0, high=sample_count, size=sample_count\n )\n\n sample_measured_values = measured_values[samples_indices]\n\n if measured_stds is not None:\n sample_measured_values += numpy.random.normal(0.0, measured_stds)\n\n sample_estimated_values = estimated_values[samples_indices]\n\n if estimated_stds is not None:\n sample_estimated_values += numpy.random.normal(0.0, estimated_stds)\n\n sample_statistics[sample_index], _ = _compute_statistics(\n sample_measured_values, sample_estimated_values, statistics\n )\n\n # Compute the SEM\n standard_errors_array = numpy.std(sample_statistics, axis=0)\n\n # Store the means and SEMs in dictionaries\n means = dict()\n standard_errors = dict()\n\n for statistic_index in range(len(mean_statistics)):\n statistic_label = statistics_labels[statistic_index]\n\n means[statistic_label] = mean_statistics[statistic_index]\n standard_errors[statistic_label] = standard_errors_array[statistic_index]\n\n # Compute the confidence intervals.\n lower_percentile_index = int(bootstrap_iterations * (1 - percentile) / 2)\n upper_percentile_index = int(bootstrap_iterations * (1 + percentile) / 2)\n\n confidence_intervals = dict()\n\n for statistic_index in range(len(mean_statistics)):\n statistic_label = statistics_labels[statistic_index]\n\n sorted_samples = numpy.sort(sample_statistics[:, statistic_index])\n\n confidence_intervals[statistic_label] = (\n sorted_samples[lower_percentile_index],\n sorted_samples[upper_percentile_index],\n )\n\n return means, standard_errors, confidence_intervals", "def confidence_interval(res: OptimizeResult, **kwargs):\n if not isinstance(res, OptimizeResult):\n raise ValueError('Argument \\'res\\' should be an instance of \\'scipy.optimize.OptimizeResult\\'')\n\n confidence = kwargs.get('confidence', 0.95)\n\n # The vector of residuals at the solution\n residuals = res.fun\n # The number of data points\n n = len(residuals)\n # The number of parameters\n p = len(res.x)\n # The degrees of freedom\n dfe = n - p\n # Get MSE. The degrees of freedom when J is full rank is v = n-p and n-rank(J) otherwise\n mse = (LA.norm(residuals)) ** 2 / dfe\n\n # Needs to estimate the jacobian at the predictor point!!!\n # ypred = func(x,res.x)\n # delta = np.zeros((len(ypred),p));\n # fdiffstep = np.amax(np.spacing(res.x)**(1/3));\n # for i in range(p):\n # change = np.zeros(p)\n # if res.x[i] == 0:\n # nb = np.sqrt(LA.norm(res.x))\n # change[i] = fdiffstep * (nb + (nb == 0))\n # else:\n # change[i] = fdiffstep * res.x[i]\n #\n # predplus = func(x,res.x+change)\n # delta[:,i] = (predplus - ypred)/change[i]\n\n # Find R to get the variance\n _, R = LA.qr(res.jac)\n # Get the rank of jac_pnp\n Rinv = LA.pinv(R)\n\n v = np.sum(Rinv ** 2, axis=1) * mse\n alpha = 1.0 - confidence\n tval = t.ppf(1.0 - alpha / 2.0, dfe)\n delta = np.sqrt(v) * tval\n ci = np.zeros((p, 2), dtype=np.float64)\n\n for i, p, d in zip(range(n), res.x, delta):\n ci[i, :] = [p - d, p + d]\n\n return ci", "def confidence_intervals(self, \n x,\n ci=0.95,\n n=1000):\n\n # Sample from the predictive distribution\n pred_dist = self.predictive_distribution(x, n=n)\n\n # TODO: assumes y is scalar, add a check for that\n\n # Compute percentiles of the predictive distribution\n lb = 100*(1.0-ci)/2.0\n q = [lb, 100.0-lb]\n prcs = np.percentile(pred_dist, q, axis=0)\n return prcs[0, :], prcs[1, :]", "def confidenceIntervalSize(stdev, nbsamples):\n # CHECKME: for better precision, maybe get the percentile dynamically, from the scipy library?\n return 2 * 1.98 * stdev / sqrt(nbsamples)", "def bootstrap(dataset, confidence=0.95, iterations=10000, sample_size=None, statistic=np.median):\n stats = list()\n if not sample_size:\n sample_size = 1 / np.sqrt(len(dataset))\n n_size = int(len(dataset) * sample_size)\n\n for _ in range(iterations):\n # Sample (with replacement) from the given dataset\n sample = resample(dataset, n_samples=n_size, replace=True)\n # Calculate user-defined statistic and store it\n stat = statistic(sample)\n stats.append(stat)\n\n # Sort the array of per-sample statistics and cut off ends\n ostats = sorted(stats)\n lval = np.nanpercentile(ostats, ((1 - confidence) / 2) * 100)\n uval = np.nanpercentile(ostats, (confidence + ((1 - confidence) / 2)) * 100)\n\n return lval, uval", "def get_confidence_interval_bootsrap(array, central_val_estimator, confidence=0.95, n_boots=100):\n\n # get an idea of all estimates\n n_size = len(array) - 1\n all_estimates = [central_val_estimator(resample(array, n_samples=n_size)) for i in range(n_boots)]\n\n # get CI\n tail_p = ((1-confidence)/2)*100\n return (np.percentile(all_estimates, tail_p), np.percentile(all_estimates, 100-tail_p))", "def _mean_confidence_interval(data, confidence=0.95):\n\n n = len(data)\n m = mean(data)\n std_err = sem(data)\n h = std_err * t.ppf((1 + confidence) / 2, n - 1)\n return m, m-h, m+h", "def _calculate_confidence_interval(ci_value, sol_df, CI_distribution):\n n = len(sol_df)\n\n if CI_distribution not in [\"t\", \"z\"]:\n warn(\"Unrecognized `CI_distribution`, defaulting to t-distribution\")\n CI_distribution = \"t\"\n\n options = {}\n if CI_distribution.startswith(\"z\"):\n interval_func = stats.norm.interval\n ddof = 0\n else:\n interval_func = stats.t.interval\n ddof = 1\n options[\"df\"] = n - ddof\n\n mean = sol_df.mean(axis=0)\n options[\"scale\"] = sol_df.std(axis=0, ddof=ddof) / np.sqrt(n)\n options[\"loc\"] = mean\n\n lower_bound, upper_bound = interval_func(ci_value, **options)\n\n return lower_bound, upper_bound, mean", "def confidence_interval(df,param,coeff=2.42):\n \n df2=df.copy()\n\n df_stats=df2[param].describe().T\n stats=df_stats[['count','mean','std']]\n\n stats\n ci95_hi=stats['mean'] + coeff*stats['std']/math.sqrt(stats['count'])\n ci95_lo=stats['mean'] - coeff*stats['std']/math.sqrt(stats['count'])\n df6=df2.loc[(df2[param]>=ci95_lo)&(df2[param]<=ci95_hi)]\n return df6", "def _se_mean_pooled(self):\n ssw_base_ratios = self._sum_sq_w(base_ratio=True)\n enum = np.nan_to_num((self.sd ** 2) * (self.cbases-1))\n denom = self.cbases-ssw_base_ratios\n\n enum_pairs = np.array([enum1 + enum2\n for enum1, enum2\n in combinations(enum[0], 2)])\n denom_pairs = np.array([denom1 + denom2\n for denom1, denom2\n in combinations(denom[0], 2)])\n\n ebases_correc_pairs = np.array([1/x + 1/y\n for x, y\n in combinations(self.ebases[0], 2)])\n\n if self.y_is_multi and self.parameters['ovlp_correc']:\n ovlp_correc_pairs = ((2*self.overlap) /\n [x * y for x, y\n in combinations(self.ebases[0], 2)])\n else:\n ovlp_correc_pairs = self.overlap\n\n return (np.sqrt((enum_pairs/denom_pairs) *\n (ebases_correc_pairs - ovlp_correc_pairs)))", "def credible_interval(mean, variance, std_multiplier):\n upper_ci = [m + std_multiplier*np.sqrt(v) for m, v in zip(mean, variance)]\n lower_ci = [m - std_multiplier*np.sqrt(v) for m, v in zip(mean, variance) ]\n\n return lower_ci, upper_ci", "def risk_ci(events, total, alpha=0.05, confint='wald'):\n risk = events / total\n c = 1 - alpha / 2\n zalpha = norm.ppf(c, loc=0, scale=1)\n if confint == 'wald':\n lr = math.log(risk / (1 - risk))\n sd = math.sqrt((1 / events) + (1 / (total - events)))\n lower = 1 / (1 + math.exp(-1 * (lr - zalpha * sd)))\n upper = 1 / (1 + math.exp(-1 * (lr + zalpha * sd)))\n elif confint == 'hypergeometric':\n sd = math.sqrt(events * (total - events) / (total ** 2 * (total - 1)))\n lower = risk - zalpha * sd\n upper = risk + zalpha * sd\n else:\n raise ValueError('Only wald and hypergeometric confidence intervals are currently supported')\n return risk, lower, upper, sd", "def OLS_stat():\n N = [100, 1000] # Number of data points\n sigma2 = [0.01, 1] # Irreducable error\n\n # Initialize model\n model_ols = OLS()\n poly_deg = 5 # complexity\n p = 0.9 # 90% confidence interval\n\n # Dataframe for storing results\n df = pd.DataFrame(columns=['N', '$\\sigma^2$', 'MSE', '$R^2$'])\n\n # Setup for plotting\n labels = generate_labels(poly_deg)\n cmap = plt.get_cmap(\"Greens\")\n\n for n in N:\n for s2 in sigma2:\n x = np.random.uniform(0, 1, (n, 2))\n noise = np.random.normal(0, s2, n)\n z = frankeFunction(x[:, 0], x[:, 1]) + noise\n model_ols.fit(x, z, poly_deg)\n\n mse = model_ols.mse(x, z)\n r2 = model_ols.r2(x, z)\n df = df.append({'N': n, '$\\\\sigma^2$': s2, 'MSE': mse,\n '$R^2$': r2}, ignore_index=True)\n\n CI = model_ols.confidence_interval(p)\n norm = matplotlib.colors.Normalize(vmin=-10, vmax=len(CI))\n\n fig = plt.figure(figsize=(8, 6))\n plt.yticks(np.arange(model_ols.params), labels)\n plt.grid()\n\n for i in range(len(CI)):\n plt.plot(CI[i], (i, i), color=cmap(norm(i)))\n plt.plot(CI[i], (i, i), \"o\", color=cmap(norm(i)))\n\n plt.gca().set_title(\"90% Confidence Interval\")\n textstr = '\\n'.join((\n \"$N = {}$\".format(n),\n \"$\\\\sigma^2 = {}$\".format(s2)))\n props = dict(boxstyle='round', facecolor='lightblue', alpha=0.5)\n plt.gca().text(0.83, 0.95, textstr, transform=plt.gca().transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n text_s2 = str(s2).replace(\".\", \"_\")\n fig.savefig(fig_path(\"conf_{}_{}.pdf\".format(n, text_s2)))\n\n # Render dataframe to a LaTeX tabular environment table and write to file\n pd.options.display.float_format = '{:,.3f}'.format\n df = df.apply(lambda x: x.astype(\n int) if np.allclose(x, x.astype(int)) else x)\n pd.options.display.latex.escape = False\n latex = df.to_latex(index=False, column_format='cccc')\n latex = latex.replace('\\\\toprule', '\\\\hline \\\\hline')\n latex = latex.replace('\\\\midrule', '\\\\hline \\\\hline')\n latex = latex.replace('\\\\bottomrule', '\\\\hline \\\\hline')\n\n with open(tab_path('ols_stat.tex'), 'w') as f:\n f.write(latex)", "def getAgeStddev(self, recogniser_csv_file, initial_recognition_file):\n df_final = pandas.read_csv(recogniser_csv_file, dtype={\"I\": object}, usecols =[\"I\", \"A\", \"R\", \"N\"], converters={\"A\": ast.literal_eval})\n df_init = pandas.read_csv(initial_recognition_file, usecols =[\"I_est\", \"A\", \"N\"], converters={\"A\": ast.literal_eval})\n \n recogs_list = df_final.values.tolist()\n count_recogs = 0\n stddev_true_mean = [0.0 for i in range(1, len(self.i_labels))]\n stddev_est_list = [0.0 for i in range(1, len(self.i_labels))] \n avg_val = [0.0 for i in range(1, len(self.i_labels))]\n estimates_mean = [[] for i in range(1, len(self.i_labels))]\n estimates_stddev = [[] for i in range(1, len(self.i_labels))]\n while count_recogs < len(recogs_list):\n isRegistered = not recogs_list[count_recogs][2]# False if register button is pressed (i.e. if the person starts the session for the first time)\n numRecognition = recogs_list[count_recogs][3]\n p_id = recogs_list[count_recogs][0]\n p_id_index = self.i_labels.index(p_id)\n\n if isRegistered:\n if self.isMultipleRecognitions:\n num_mult_recognitions = df_final.loc[df_final['N'] == numRecognition].A.count()\n\n for num_recog in range(0, num_mult_recognitions): \n est_mean = recogs_list[count_recogs][1][0]\n est_conf = recogs_list[count_recogs][1][1]\n if est_conf > 0: \n if est_conf == 1.0:\n est_stddev = 0.0\n else:\n est_stddev = 0.5/self.normppf(est_conf + (1-est_conf)/2.0)\n estimates_mean[p_id_index-1].append(est_mean)\n estimates_stddev[p_id_index-1].append(est_stddev)\n stddev_true_mean[p_id_index-1] += math.pow(est_mean - self.ages[p_id_index], 2) + math.pow(est_stddev,2)\n avg_val[p_id_index-1] += est_mean\n if num_recog < num_mult_recognitions - 1:\n count_recogs += 1\n else:\n est_mean = recogs_list[count_recogs][1][0]\n est_conf = recogs_list[count_recogs][1][1]\n if est_conf > 0: \n if est_conf == 1.0:\n est_stddev = 0.0\n else:\n est_stddev = 0.5/self.normppf(est_conf + (1-est_conf)/2.0)\n estimates_mean[p_id_index-1].append(est_mean)\n estimates_stddev[p_id_index-1].append(est_stddev)\n stddev_true_mean[p_id_index-1] += math.pow(est_mean - self.ages[p_id_index], 2) + math.pow(est_stddev,2)\n avg_val[p_id_index-1] += est_mean\n \n else:\n if self.isMultipleRecognitions:\n \n init_recog_est = df_init.loc[df_init['N'] == numRecognition].values.tolist()\n num_mult_recognitions = len(init_recog_est)\n for num_recog in range(0, num_mult_recognitions):\n est_mean = init_recog_est[num_recog][1][0]\n est_conf = init_recog_est[num_recog][1][1]\n if est_conf > 0: \n if est_conf == 1.0:\n est_stddev = 0.0\n else:\n est_stddev = 0.5/self.normppf(est_conf + (1-est_conf)/2.0)\n estimates_mean[p_id_index-1].append(est_mean)\n estimates_stddev[p_id_index-1].append(est_stddev)\n stddev_true_mean[p_id_index-1] += math.pow(est_mean - self.ages[p_id_index], 2) + math.pow(est_stddev,2)\n avg_val[p_id_index-1] += est_mean\n \n num_mult_recognitions = df_final.loc[df_final['N'] == numRecognition].A.count()\n for num_recog in range(0, num_mult_recognitions):\n est_mean = recogs_list[count_recogs][1][0]\n est_conf = recogs_list[count_recogs][1][1]\n if est_conf > 0: \n if est_conf == 1.0:\n est_stddev = 0.0\n else:\n est_stddev = 0.5/self.normppf(est_conf + (1-est_conf)/2.0)\n estimates_mean[p_id_index-1].append(est_mean)\n estimates_stddev[p_id_index-1].append(est_stddev)\n stddev_true_mean[p_id_index-1] += math.pow(est_mean - self.ages[p_id_index], 2) + math.pow(est_stddev,2)\n avg_val[p_id_index-1] += est_mean\n if num_recog < num_mult_recognitions - 1:\n count_recogs += 1\n\n else:\n\n init_recog_est = df_init.loc[df_init['N'] == numRecognition].values.tolist()\n est_mean = init_recog_est[1][0]\n est_conf = init_recog_est[1][1]\n if est_conf > 0: \n if est_conf == 1.0:\n est_stddev = 0.0\n else:\n est_stddev = 0.5/self.normppf(est_conf + (1-est_conf)/2.0)\n estimates_mean[p_id_index-1].append(est_mean)\n estimates_stddev[p_id_index-1].append(est_stddev)\n stddev_true_mean[p_id_index-1] += math.pow(est_mean - self.ages[p_id_index], 2) + math.pow(est_stddev,2)\n avg_val[p_id_index-1] += est_mean\n \n est_mean = recogs_list[count_recogs][1][0]\n est_conf = recogs_list[count_recogs][1][1]\n if est_conf > 0: \n if est_conf == 1.0:\n est_stddev = 0.0\n else:\n est_stddev = 0.5/self.normppf(est_conf + (1-est_conf)/2.0)\n estimates_stddev[p_id_index-1].append(est_stddev)\n stddev_true_mean[p_id_index-1] += math.pow(est_mean - self.ages[p_id_index], 2) + math.pow(est_stddev,2)\n avg_val[p_id_index-1] += est_mean\n count_recogs += 1\n \n for counter in range(0, len(estimates_mean)):\n if len(estimates_mean[counter]) > 0:\n avg_val[counter] /= len(estimates_mean[counter])\n stddev_true_mean[counter] = math.sqrt(stddev_true_mean[counter]/len(estimates_mean[counter])) \n for count_val in range(0, len(estimates_mean[counter])):\n stddev_est_list[counter] += math.pow(estimates_mean[counter][count_val] - avg_val[counter], 2) + math.pow(estimates_stddev[counter][count_val],2)\n if len(estimates_mean[counter]) > 1:\n stddev_est_list[counter] = math.sqrt(stddev_est_list[counter]/(len(estimates_mean[counter])-1))\n \n return stddev_true_mean, stddev_est_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the authorship from this reader with a writer, given by name (e.g., text, biorxiv).
def print( # noqa:T202 self, writer: HintOrType["Writer"] = "text", writer_kwargs: OptionalKwargs = None, file=None, **kwargs, ) -> None: from ..writers import writer_resolver _writer = writer_resolver.make(writer, writer_kwargs) if file is None: _writer.print(self.get_authorship(), **kwargs) elif isinstance(file, (str, Path)): with Path(file).expanduser().resolve().open("w") as _file: _writer.print(self.get_authorship(), file=_file, **kwargs) else: _writer.print(self.get_authorship(), file=file, **kwargs)
[ "def author_name(self) -> str:", "def display(self):\r\n\r\n bookinfo = '\"{}, written by {}\"'.format(self.title, self.author)\r\n print bookinfo", "def display_author(self):\n self.screen.blit(self.author, (0, 620))", "def author(cls, author_name: str) -> \"meta\":\n return cls(name=\"author\", content=author_name)", "def author(self, author):\r\n doc.author = author", "def _parse_author(self, name):\n match = Pubspec._AUTHOR_RE.search(name)\n return (match.group(1), match.group(2))", "def print_authors(authors: dict[str, str]) -> None:\n print(rpipes.terminal.move_y(rpipes.terminal.height // 2 - len(authors) // 2), end=\"\")\n\n for author in authors:\n print(rpipes.terminal.move_right(2), end=\"\")\n print(\n rpipes.terminal.link(\n authors[author],\n rpipes.terminal.white_bold\n + author\n + rpipes.terminal.normal\n + \" - \"\n + authors[author],\n )\n ) # Not all terminals support links so it also prints the url next to the author\n\n print(\n rpipes.terminal.move(rpipes.terminal.height - 3, rpipes.terminal.width - 20)\n + f\"Press {rpipes.terminal.white_bold}B{rpipes.terminal.normal} to go back\"\n )\n draw_boundary()", "def author(self) -> Dict[str, str]:\n\n # No display names in DMs\n if isinstance(self.dest, DMChannel):\n name = self.bot.user.name\n else:\n name = self.ctx.guild.me.display_name\n\n author = {\n 'name': f'{name} Help Manual',\n 'icon_url': self.avatar\n }\n return author", "def add_author(self, author):\n\t\twith open(self.file_path, mode=\"r\", encoding='utf-8') as file:\n\t\t\tfile_content = file.readlines()\n\n\n\t\ttry:\n\t\t\tif not file_content[0].startswith(\"*Author :\"):\n\t\t\t\tfile_content.insert(0, f\"*Author : {author}\\n\")\n\t\t\telse:\n\t\t\t\tdel file_content[0]\n\t\t\t\tfile_content.insert(0, f\"*Author : {author}\\n\")\n\t\texcept:\n\t\t\tfile_content.insert(0, f\"*Author : {author}\\n\")\n\n\t\tself.write(file_content)", "def get_author_name(obj):\n return obj.author.username", "def get_author(self):\n return self.user.first_name +\" \"+ self.user.last_name", "def get_author(self):\n return self.get_abstract_item(\"General\", \"Author\")", "def scrap_authors(self):\n self.orchestrate_master_detail(\n self.AUTHORS_URL_NAME,\n self.AUTHOR_URL_NAME,\n \"author\",\n \"tales\",\n \"Author\",\n \"authors\"\n )", "def print_leader(self):\n print(\"Current leader is\", self.leader_name)", "def addAuthor(self, author):\r\n authorPage=AuthorPage(self.site)\r\n if author in self.metadata['orcids']:\r\n existingauthor = self.authorAlreadyExists(self.metadata['orcids'][author])\r\n if existingauthor == False :\r\n authorPage.setName(author)\r\n authorPage.addOrcid(self.metadata['orcids'][author])\r\n authorPage.setItemType()\r\n else:\r\n authorPage=AuthorPage(self.site, existingauthor)\r\n else:\r\n authorPage.setName(author)\r\n authorPage.setItemType()\r\n #print(\"adding author:\" + author)\r\n self.addDelayedClaim('P2', authorPage)", "def test_citation_has_single_author_with_display_name_no_config(self):\n citation = Citation.objects.create(title='The title',\n type_controlled=Citation.ARTICLE)\n editor = Authority.objects.create(name='Editor', id=\"ED1\", type_controlled=Authority.PERSON)\n relation = ACRelation.objects.create(citation=citation, id=\"ACR1\", authority=editor, type_controlled=ACRelation.EDITOR, name_for_display_in_citation='Some other name')\n expected = \"ACR_ID ACR1 ACRStatus Active ACRType Editor ACRDisplayOrder 1.0 ACRNameForDisplayInCitation Some other name AuthorityID ED1 AuthorityStatus Active AuthorityType Person AuthorityName Editor\"\n self.assertEqual(expected, export.citation_editor(citation, []))", "def author(thing, replace_none=True):\n def _author(thing, replace_none):\n author = '[deleted/removed]' if replace_none else None\n if hasattr(thing, 'author') and thing.author:\n author = thing.author.name\n return author\n\n return _network_wrapper(_author, thing, replace_none)", "def group_by_author(self, author):\r\n if isinstance(author, Author):\r\n print(author.get_book_author())", "def validate_writer(credits, author_name_clean): # method to find book author in the tv or movie credits\n\n if len(credits[1]) == 0:\n return False\n else:\n for i in credits[1]: # find book author as writer credit in tv show\n if i[\"department\"] == \"Writing\":\n if i[\"name\"].replace(\" \", \"\") == author_name_clean:\n # print(\"Yes, same writer.\")\n return True\n return False # will return false if author name is never found in credits" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the prepared authorship.
def get_authorship(self) -> "Authorship": return self.authorship
[ "def get_author(self):\n\t\treturn self._author", "def author_info(self):\n return User.objects.get(pk=self.author)", "def author_info(self):\n return User.objects.get(pk=self.user_id)", "def author(self):\n return self._commit.author", "def author(self) -> SAuthor:\n return self._raw_author", "def get_author(self):\n return self.get_abstract_item(\"General\", \"Author\")", "def get_or_create_author(self):\n if not self.validate():\n return None\n\n return Author.get_or_create(\n name = self.name.data,\n author_type = AuthorType.query.get(self.author_type_id.data),\n gender = Gender.query.get(self.person_gender_id.data) if self.person_gender_id.data else None,\n race = Race.query.get(self.person_race_id.data) if self.person_race_id.data else None)", "def get_person(self):\n return self.authors.all()[0]", "def get_author(self):\n return self.user.first_name +\" \"+ self.user.last_name", "def get_author_fullname(self):\n\n try:\n # Find the author too.\n userlookup = self.transaction[0][\"authorPHID\"]\n who = dict(self.phab.phid.query(\n phids=[userlookup]))[userlookup][\"fullName\"]\n\n self.logger.debug('get_author_fullname: %s' % who)\n return who\n\n # If the object exists, no worries, let's just return a good response.\n except http.client.HTTPException:\n self.logger.info('get_author_fullname is None')\n return None", "def first_author(self):\n if self.first_author_id is None:\n return None\n return Author(author_id=self.first_author_id, authors_fetcher=self._authors_fetcher)", "def author(self) -> Dict[str, str]:\n\n # No display names in DMs\n if isinstance(self.dest, DMChannel):\n name = self.bot.user.name\n else:\n name = self.ctx.guild.me.display_name\n\n author = {\n 'name': f'{name} Help Manual',\n 'icon_url': self.avatar\n }\n return author", "def last_author(self):\n if self.last_author_id is None:\n return None\n return Author(self.last_author_id, self._authors_fetcher)", "def author(thing, replace_none=True):\n def _author(thing, replace_none):\n author = '[deleted/removed]' if replace_none else None\n if hasattr(thing, 'author') and thing.author:\n author = thing.author.name\n return author\n\n return _network_wrapper(_author, thing, replace_none)", "def first_author_id(self):\n if self.author_ids_list is None:\n return None\n return self.author_ids_list[0]", "def get_author_name(obj):\n return obj.author.username", "def get_author(self):\n\t\twith open(self.file_path, \"r\", encoding='utf-8') as file:\n\t\t\ttry:\n\t\t\t\tauthor = file.readline()\n\t\t\t\treturn author[10:-1]\n\t\t\texcept:\n\t\t\t\treturn None", "def author_name(self) -> str:", "def get_author_by_attr():\n dbh = db_handler.DbHandler()\n docs_author = dbh.fetch_author()\n is_id = request.args.get('id')\n is_name = request.args.get('name')\n if is_id:\n for obj in docs_author:\n if obj['_id'] == is_id:\n return jsonify(obj), 200\n abort(404, \"Page Not Found: No such id\")\n if is_name:\n for obj in docs_author:\n if obj['author_name'] == is_name:\n return jsonify(obj), 200\n abort(404, \"Page Not Found: No such name\")\n abort(404, \"Page Not Found: failed get author\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the circular convolution helper functions in Numpy
def test_helpers(self): rng = np.random.RandomState(43232) dims = 1000 invert_a = True invert_b = False x = rng.randn(dims) y = rng.randn(dims) z0 = circconv(x, y, invert_a=invert_a, invert_b=invert_b) dims2 = 2*dims - (2 if dims % 2 == 0 else 1) inA = CircularConvolution._input_transform( dims, first=True, invert=invert_a) inB = CircularConvolution._input_transform( dims, first=False, invert=invert_b) outC = CircularConvolution._output_transform(dims) XY = np.zeros((dims2,2)) XY += np.dot(inA.reshape(dims2, 2, dims), x) XY += np.dot(inB.reshape(dims2, 2, dims), y) C = XY[:,0] * XY[:,1] z1 = np.dot(outC, C) assert_allclose(self, logger, z0, z1)
[ "def test_regressiontest_issue9168():\n\n x = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]],)\n\n kernel_fwhm = 1*u.arcsec\n pixel_size = 1*u.arcsec\n\n kernel = Gaussian2DKernel(x_stddev=kernel_fwhm/pixel_size)\n\n result = convolve_fft(x, kernel, boundary='fill', fill_value=np.nan,\n preserve_nan=True)\n result = convolve(x, kernel, boundary='fill', fill_value=np.nan,\n preserve_nan=True)", "def circular_conv(signal1, signal2):\n\n if signal1.shape != signal2.shape:\n raise Exception(\"The two signals for circular convolution do not have the same shape\")\n\n signal2_extended = np.concatenate((signal2, signal2, signal2)) # replicate signal at both ends\n\n signal_conv_extended = np.convolve(signal1, signal2_extended, mode=\"same\") # median filtering\n\n signal_conv = signal_conv_extended[len(signal1):2*len(signal1)] # truncate back the signal\n\n return signal_conv", "def test_convolve(self):\n\t\tx = np.array([1.0, 2.0, 3.0])\n\t\th = np.array([0.0, 0.5, 0.5])\n\n\t\tself.assertEqual(signal.convolve(x, h),\n\t\t [0.0, 0.5, 1.5, 2.5, 1.5])\n\n\t\tassert_equal(len(signal.convolve(x, h)), 5)", "def circular_kernel(radius):\n\n width = 2*radius + 1\n kernel = np.zeros((width, width), np.uint8)\n for i in range(0, width):\n for j in range(0, width):\n if (i - radius) ** 2 + (j - radius) ** 2 <= radius**2:\n kernel[i][j] = int(1)\n return kernel", "def ndcircconv(x, h):\n n = x.shape\n m = h.shape\n k = np.array(n) + np.array(m) - 1\n return np.real(sp.fft.ifftn(sp.fft.fftn(h, s=k) * sp.fft.fftn(x, s=k))).flat", "def test_convol(self):\n for sigma in [2, 15 / 8.]:\n ksize = int(8 * sigma + 1)\n x = numpy.arange(ksize) - (ksize - 1.0) / 2.0\n gaussian = numpy.exp(-(x / sigma) ** 2 / 2.0).astype(numpy.float32)\n gaussian /= gaussian.sum(dtype=numpy.float32)\n gpu_filter = pyopencl.array.to_device(self.queue, gaussian)\n t0 = time.time()\n k1 = self.program.horizontal_convolution(self.queue, self.shape, self.wg,\n self.gpu_in.data, self.gpu_tmp.data, gpu_filter.data, numpy.int32(ksize), self.IMAGE_W, self.IMAGE_H)\n k2 = self.program.vertical_convolution(self.queue, self.shape, self.wg,\n self.gpu_tmp.data, self.gpu_out.data, gpu_filter.data, numpy.int32(ksize), self.IMAGE_W, self.IMAGE_H)\n res = self.gpu_out.get()\n k2.wait()\n t1 = time.time()\n ref = my_blur(self.input, gaussian)\n # ref = scipy.ndimage.gaussian_filter(self.input, sigma, mode=\"reflect\")\n t2 = time.time()\n delta = abs(ref - res).max()\n if ksize % 2 == 0: # we have a problem with even kernels !!!\n self.assertLess(delta, 50, \"sigma= %s delta=%s\" % (sigma, delta))\n else:\n self.assertLess(delta, 1e-4, \"sigma= %s delta=%s\" % (sigma, delta))\n logger.info(\"sigma= %s delta=%s\" % (sigma, delta))\n if self.PROFILE:\n logger.info(\"Global execution time: CPU %.3fms, GPU: %.3fms.\" % (1000.0 * (t2 - t1), 1000.0 * (t1 - t0)))\n logger.info(\"Horizontal convolution took %.3fms and vertical convolution took %.3fms\" % (1e-6 * (k1.profile.end - k1.profile.start),\n 1e-6 * (k2.profile.end - k2.profile.start)))", "def convolution_exact(nit, Y, U, nu=1):\n\n\n Ct = np.zeros((9,20))\n if nit >= nu:\n for k in range(nu,nit):\n Ct[0,-2] += Y[0,k]*U[-2,nit-k]\n Ct[1,-3] += Y[1,k]*U[-3,nit-k]\n Ct[2,-3] += Y[2,k]*U[-3,nit-k]\n Ct[3,-5] += Y[3,k]*U[-5,nit-k]\n Ct[4,1] += Y[4,k]*U[1,nit-k]\n Ct[5,2] += Y[5,k]*U[2,nit-k]\n Ct[6,2] += Y[6,k]*U[2,nit-k]\n Ct[7,4] += Y[7,k]*U[4,nit-k]\n Ct[8,3] += Y[8,k]*U[3,nit-k]\n\n return Ct", "def convolve_1d(x, k):\n y=np.zeros_like(x)\n\n \"\"\"\n *******************************************\n *** TODO: write code to perform convolution\n *******************************************\n\n The output should be the same size as the input\n You can assume zero padding, and an odd-sized kernel\n \"\"\"\n \n #Retrieve sizes for loops\n vector_size = x.size\n kernel_size = k.size\n half_size = int((kernel_size-1)/2)\n \n #Create a temporary array that is zero-padded at the front and back for edges\n results=np.zeros(vector_size+(2*half_size))\n for i in range(0, vector_size):\n results[half_size+i] = x[i]\n \n #Swap the kernel\n j = kernel_size-1\n for i in range(0,half_size):\n temp = k[i]\n k[i] = k[j]\n k[j] = temp\n j -= 1\n\n #Calculate the convolution for each index and output into results\n for i in range(0,vector_size):\n accumulator = 0;\n for j in range(0,kernel_size):\n accumulator += (results[i+j]*k[j])\n y[i] = accumulator\n\n\n \"\"\"\n *******************************************\n \"\"\"\n\n return y", "def convolution_with_numpy(x: np.ndarray, W: np.ndarray, stride: int = 1, pad: int = 0) \\\n -> np.ndarray:\n n, c_i, h_i, w_i = x.shape\n c_o, c_i, h_k, w_k = W.shape\n\n if h_k > h_i or w_k > w_i:\n raise AssertionError('The height and width of x must be smaller than W')\n\n if stride > (h_i - h_k + 2 * pad + 1) or stride > (w_i - w_k + 2 * pad + 1):\n raise AssertionError('The value of stride must be smaller than output tensor size')\n\n h_o = math.floor((h_i - h_k + 2 * pad) / float(stride)) + 1\n w_o = math.floor((w_i - w_k + 2 * pad) / float(stride)) + 1\n\n if pad > 0:\n new_x = np.zeros((n, c_i, h_i + 2 * pad, w_i + 2 * pad), dtype=np.float32)\n for nn in range(n):\n for cc_i in range(c_i):\n new_x[nn][cc_i] = np.pad(x[nn][cc_i], pad, 'constant')\n x = new_x\n\n result = np.zeros((n, c_o, h_o, w_o), dtype=np.float32)\n\n for nn in range(n):\n for cc_i in range(c_i):\n for cc_o in range(c_o):\n for h in range(h_o):\n for w in range(w_o):\n for k_h in range(h_k):\n for k_w in range(w_k):\n result[nn, cc_o, h, w] += \\\n x[nn][cc_i][h * stride + k_h][w * stride + k_w] * \\\n W[cc_o][cc_i][k_h][k_w]\n\n return result", "def convolve(array,kernel,edge_degree=1,fit_width=2):\n\n import numpy as np\n import pdb\n import tayph.functions as fun\n from tayph.vartests import typetest,postest,dimtest\n typetest(edge_degree,int,'edge_degree in ops.convolve()')\n typetest(fit_width,int,'edge_degree in ops.convolve()')\n typetest(array,[list,np.ndarray])\n typetest(kernel,[list,np.ndarray])\n dimtest(array,[0],'array in ops.convolve()')\n dimtest(kernel,[0],'array in ops.convolve()')\n postest(edge_degree,'edge_degree in ops.convolve()')\n postest(fit_width,'edge_degree in ops.convolve()')\n\n array = np.array(array)\n kernel= np.array(kernel)\n\n if len(kernel) >= len(array)/4.0:\n raise Exception(f\"Error in ops.convolve(): Kernel length is larger than a quarter of the array ({len(kernel)}, {len(array)}). Can't extrapolate over that length. And you probably don't want to be doing a convolution like that, anyway.\")\n\n if len(kernel) % 2 != 1:\n raise Exception('Error in ops.convolve(): Kernel needs to have an odd number of elements.')\n\n #Perform polynomial fits at the edges.\n x=fun.findgen(len(array))\n fit_left=np.polyfit(x[0:len(kernel)*2],array[0:len(kernel)*2],edge_degree)\n fit_right=np.polyfit(x[-2*len(kernel)-1:-1],array[-2*len(kernel)-1:-1],edge_degree)\n\n #Pad both the x-grid (onto which the polynomial is defined)\n #and the data array.\n pad=fun.findgen((len(kernel)-1)/2)\n left_pad=pad-(len(kernel)-1)/2\n right_pad=np.max(x)+pad+1\n left_array_pad=np.polyval(fit_left,left_pad)\n right_array_pad=np.polyval(fit_right,right_pad)\n\n #Perform the padding.\n x_padded = np.append(left_pad , x)\n x_padded = np.append(x_padded , right_pad) #Pad the array with the missing elements of the kernel at the edge.\n array_padded = np.append(left_array_pad,array)\n array_padded = np.append(array_padded,right_array_pad)\n\n #Reverse the kernel because np.convol does that automatically and I don't want that.\n #(Imagine doing a derivative with a kernel [-1,0,1] and it gets reversed...)\n kr = kernel[::-1]\n #The valid keyword effectively undoes the padding, leaving only those values for which the kernel was entirely in the padded array.\n #This thus again has length equal to len(array).\n return np.convolve(array_padded,kr,'valid')", "def convolution(image, kernel):\n kh = kernel.shape[0] #kernel height\n kw = kernel.shape[1] #kernel width\n khm = math.floor(kh/2) #half of kernel height\n kwm = math.floor(kw/2) #half of kernel width\n ih = image.shape[0] #image height\n iw = image.shape[1] #image width\n #make an image frameless\n im_temp = np.zeros((ih+kh, iw+kw))\n im_temp[khm:ih+khm, kwm:iw+kwm] = image\n im_temp[0:khm, kwm:iw+kwm] = image[0:khm, :]\n im_temp[ih+khm:ih+2*khm, kwm:iw+kwm] = image[ih-khm:ih, :]\n im_temp[khm:ih+khm:, 0:kwm] = image[:, 0:kwm]\n im_temp[khm:ih+khm, iw+kwm:iw+2*kwm] = image[:, iw-kwm:iw]\n #create a new image to store the convoluted image\n convoluted = np.zeros((ih, iw))\n #convolute an image with a flipped kernel\n for i in range(ih):\n for j in range(iw):\n weights = 0\n for k in range(kh):\n for l in range(kw):\n kk = kh - 1 - k\n ll = kw - 1 - l\n weights = weights + im_temp[i+k, j+l] * kernel[kk,ll] \n convoluted[i,j] = weights\n return convoluted", "def convolve(self, img):", "def makeConvolutionKernel(xobs, yobs, detector, psf):\n\n half=detector.nPix/2\n xx,yy=np.meshgrid((np.arange(detector.nPix)-half)*detector.pixScale,(np.arange(detector.nPix)-half)*detector.pixScale)\n if(psf.atmosFWHM > 0):\n atmos_sigma=psf.atmosFWHM/(2.*np.sqrt(2.*np.log(2.)))\n if(detector.vSampConvolve): # PSF and Fiber convolution\n psfArr=np.exp(-(xx**2 + yy**2)/(2.*atmos_sigma**2))\n fibArrs=np.zeros((detector.nVSamp,detector.nPix,detector.nPix))\n if(detector.vSampShape==\"circle\"):\n sel=np.array([((xx-pos[0])**2 + (yy-pos[1])**2 < detector.vSampSize**2) for pos in zip(xobs,yobs)])\n elif(detector.vSampShape==\"square\"):\n PArad=np.deg2rad(detector.vSampPA)\n sel=np.array([((np.abs((xx-pos[0])*np.cos(PArad) - (yy-pos[1])*np.sin(PArad)) < 0.5*detector.vSampSize) & (np.abs((xx-pos[0])*np.sin(PArad) + (yy-pos[1])*np.cos(PArad)) < 0.5*detector.vSampSize)) for pos in zip(xobs,yobs)])\n fibArrs[sel]=1.\n kernel=np.array([scipy.signal.fftconvolve(psfArr,fibArrs[ii],mode=\"same\") for ii in range(detector.nVSamp)])\n else:\n # this is basically the psf convolved with a delta function at the center of each fiber\n kernel=np.array([np.exp(-((xx-pos[0])**2 + (yy-pos[1])**2)/(2.*atmos_sigma**2)) for pos in zip(xobs,yobs)])\n else:\n # Fiber only\n kernel=np.zeros((detector.nVSamp,detector.nPix,detector.nPix))\n if(detector.vSampShape==\"circle\"):\n sel=np.array([((xx-pos[0])**2 + (yy-pos[1])**2 < detector.vSampSize**2) for pos in zip(xobs,yobs)])\n elif(detector.vSampShape==\"square\"):\n PArad=np.deg2rad(detector.vSampPA)\n sel=np.array([((np.abs((xx-pos[0])*np.cos(PArad) - (yy-pos[1])*np.sin(PArad)) < 0.5*detector.vSampSize) & (np.abs((xx-pos[0])*np.sin(PArad) + (yy-pos[1])*np.cos(PArad)) < 0.5*detector.vSampSize)) for pos in zip(xobs,yobs)])\n kernel[sel]=1.\n \n return kernel", "def convolve(data, kernel):\n\n if not isinstance(data, np.ndarray) or data.ndim != 2:\n raise TypeError('Input data must be a 2D numpy array.')\n\n if not isinstance(kernel, np.ndarray) or data.ndim != 2:\n raise TypeError('Input kernel must be a 2D numpy array.')\n\n return convolve_fft(data, kernel, boundary='wrap', crop=False,\n nan_treatment='fill', normalize_kernel=False)", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n # START YOUR CODE HERE ### (You can change anything inside this block)\n\n conv_result = im\n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n # Visualize FFT\n plt.subplot(1, 5, 3)\n # Visualize FFT kernel\n plt.subplot(1, 5, 4)\n # Visualize filtered FFT image\n plt.subplot(1, 5, 5)\n # Visualize filtered spatial image\n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def Convolution(image, convFilter):\r\n startDistance = math.sqrt(len(convFilter))//2\r\n # convFilter = convFilter.reverse()\r\n if math.sqrt(len(convFilter))%2 != 1:\r\n print(\"Not Valide filter size\")\r\n return\r\n length = int(math.sqrt(len(convFilter)))\r\n arr = np.asarray(image.shape)\r\n out = np.zeros(arr, dtype=np.uint8)\r\n for k in range(arr[2]): # Channels\r\n for i in range(arr[0]): # Columns/x\r\n for j in range(arr[1]): # Rows/y\r\n sx = i-startDistance\r\n sy = j-startDistance\r\n temp = 0\r\n for y in range(length):\r\n for x in range(length):\r\n # if i < 0 or j < 0 or i\r\n deltaX = sx + x\r\n deltaY = sy + y\r\n if deltaX < 0 or deltaY < 0 or deltaX >= arr[0] or deltaY >= arr[1]:\r\n pixel = image[i, j, k]\r\n filter = convFilter[length * y + x]\r\n value = pixel * filter\r\n else:\r\n pixel = image[int(deltaX), int(deltaY), k]\r\n filter = convFilter[length * y + x]\r\n value = pixel * filter\r\n # print(\"pixel: \", pixel, \" fileter: \", filter)\r\n temp = temp + value / len(convFilter)\r\n # print(\"+\", value)\r\n # print(\"===\", temp)\r\n out[i, j, k] = temp\r\n return out", "def _fft_convolve2d(x, y):\n pad = np.array(x.shape) - np.array(y.shape)\n if pad[0] % 2 == 0:\n rb, ra = int(pad[0]/2)+1, int(pad[0]/2)-1\n else:\n rb, ra = int(np.ceil(pad[0]/2)), int(np.floor(pad[0]/2))\n if pad[1] % 2 == 0:\n cb, ca = int(pad[1]/2)+1, int(pad[1]/2)-1\n else:\n cb, ca = int(np.ceil(pad[1]/2)), int(np.floor(pad[1]/2))\n pad_width = ((rb, ra), (cb, ca))\n py = np.pad(y, pad_width, mode=\"constant\")\n\n fr = fft2(x)\n fr2 = fft2(np.flipud(np.fliplr(py)))\n m,n = fr.shape\n cc = np.real(ifft2(fr*fr2))\n cc = np.roll(cc, int(-m/2+1), axis=0)\n cc = np.roll(cc, int(-n/2+1), axis=1)\n return cc", "def smart_convolve(vec, mask, mode='full'):\n \n case_short_mask = len(mask) <= 8\n case_not_power_of_2 = not numpy_extension.is_power_of_2(len(vec))\n case_naive = case_short_mask or case_not_power_of_2\n case_fft = not case_naive\n \n if case_naive:\n return np.convolve(vec, mask, mode)\n elif case_fft:\n return sp.signal.fftconvolve(vec, mask, mode)", "def convolve(im, kernel):\n if (len(im.shape)==2):\n im = np.expand_dims(im, 2)\n H, W, B = im.shape\n imc = np.zeros((H, W, B))\n for band in range(B):\n imc[:, :, band] = sps.correlate2d(im[:, :, band], kernel, mode='same')\n return imc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asserts the same organization can be linked to several identity providers
def test_one_organization_many_identity_providers(self): IdpOrganizationAssociation.objects.create( organization=self.organization, idp_identifier="https://some-other-idp.com/entity/id/", ) IdpOrganizationAssociation.objects.create( organization=self.organization, idp_identifier="https://my-idp.com/entity/id/", ) self.assertEqual( IdpOrganizationAssociation.objects.filter( organization=self.organization ).count(), 3, )
[ "def test_one_identity_provider_many_organizations(self):\n with self.assertRaises(ValidationError) as exception_context_manager:\n IdpOrganizationAssociation.objects.create(\n organization=OrganizationFactory(),\n idp_identifier=self.idp_entity_id,\n )\n raised_exception = exception_context_manager.exception\n self.assertListEqual(\n raised_exception.messages,\n [\n \"Idp organization association with this Identity provider ID already exists.\",\n ],\n )", "def test_organizations(self):\n self.assert_requires_auth(self.instance.organizations)", "def testOrgAdminsForOrg(self):\n org_admin_properties = {'org_admin_for': [self.foo_org.key()],\n 'is_org_admin': True}\n\n foo_org_admin1 = seeder_logic.seed(GCIProfile, org_admin_properties)\n foo_org_admin2 = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n org_admin_properties['org_admin_for'] = [self.bar_org.key()]\n bar_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n # Check for self.foo_org (two admins)\n expected = [foo_org_admin1.key(), foo_org_admin2.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.foo_org)]\n self.assertEqual(expected, actual)\n\n # Check for self.bar_org (just one admin)\n expected = [bar_org_admin.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.bar_org)]\n self.assertEqual(expected, actual)", "def test_get_organization_memberships(self):\n pass", "def test_change_organization(self):\n pass", "def test_organization_issues(self):\n self.assert_requires_auth(self.instance.organization_issues, \"org\")", "def test_organization_id_get(self):\n pass", "def test_post_organization_memberships(self):\n pass", "def test_delete_organization_memberships(self):\n pass", "def verify_github_org_membership():\n try:\n username = g.github_user\n token = g.github_token\n except AttributeError:\n raise GitHubAuthenticationError()\n\n # Access the user's organization memberships (need to iterate)\n # https://developer.github.com/v3/orgs/#list-your-organizations\n org_data = iter_github_endpoint(\n 'https://api.github.com/user/orgs',\n auth=(username, token),\n headers={'Accept': 'application/vnd.github.v3+json'}\n )\n\n org_list = [org['login'] for org in org_data]\n if current_app.config['AUTHORIZED_GITHUB_ORG'] not in org_list:\n raise GitHubAuthorizationError()", "def test_getting_affiliation_invitations_for_the_org(app, client, jwt, session, keycloak_mock, business_mock,\n stan_server):\n orig_val_max_number_of_orgs = app.config.get('MAX_NUMBER_OF_ORGS')\n app.config.update(MAX_NUMBER_OF_ORGS=10)\n # setup all the required data\n headers, org_id_0a, org_id_0b, business_identifier = setup_affiliation_invitation_data(client,\n jwt,\n session,\n keycloak_mock)\n new_org_ids, new_business_identifiers = setup_additional_affiliation_invitation_data(client,\n jwt,\n session,\n keycloak_mock)\n\n _create_affiliations_for_test(client, headers,\n org_id1=new_org_ids[0],\n org_id2=new_org_ids[1],\n org_id3=new_org_ids[2],\n org_id4=new_org_ids[3],\n business_identifier1=business_identifier,\n business_identifier2=new_business_identifiers[0])\n\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n expected_org_id = new_org_ids[0]\n url = f'/api/v1/affiliationInvitations?orgId={expected_org_id}&businessDetails=True'\n affiliation_invitations_response = client.get(url, headers=headers)\n affiliation_invitations_dict: dict = json.loads(affiliation_invitations_response.data)\n affiliation_invitations = affiliation_invitations_dict['affiliationInvitations']\n\n assert len(affiliation_invitations) == 2 # should be two, one for 'toOrg' other for 'fromOrg'\n assert affiliation_invitations[0]['toOrg']['id'] == expected_org_id \\\n or affiliation_invitations[0]['fromOrg']['id'] == expected_org_id\n assert affiliation_invitations[1]['toOrg']['id'] == expected_org_id \\\n or affiliation_invitations[1]['fromOrg']['id'] == expected_org_id\n\n app.config.update(MAX_NUMBER_OF_ORGS=orig_val_max_number_of_orgs)", "def test_organization_id_resource_get(self):\n pass", "def test_organization_id_public_get(self):\n pass", "def test_organization_id_delete(self):\n pass", "def testQueryAllMentorKeysForOrg(self):\n #Since there are no mentors assigned to foo_org or bar_org, an empty list\n #should be returned.\n expected_keys = []\n actual_keys = profile_logic.queryAllMentorsKeysForOrg(self.foo_org)\n self.assertEqual(expected_keys, actual_keys)\n\n actual_keys = profile_logic.queryAllMentorsKeysForOrg(self.bar_org)\n self.assertEqual(expected_keys, actual_keys)\n\n mentor_properties = {'mentor_for': [self.foo_org.key()], 'is_mentor': True}\n foo_mentors = seeder_logic.seedn(GCIProfile, 5, mentor_properties)\n\n org_admin_properties = {'org_admin_for': [self.foo_org.key()],\n 'mentor_for': [self.foo_org.key()],\n 'is_mentor': True, 'is_org_admin': True}\n foo_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n mentor_properties['mentor_for'] = [self.bar_org.key()]\n bar_mentors = seeder_logic.seedn(GCIProfile, 5, mentor_properties)\n\n org_admin_properties['org_admin_for'] = [self.bar_org.key()]\n org_admin_properties['mentor_for'] = [self.bar_org.key()]\n bar_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n expected = [mentor.key() for mentor in foo_mentors] + [foo_org_admin.key()]\n actual = profile_logic.queryAllMentorsKeysForOrg(self.foo_org)\n self.assertEqual(expected, actual)\n\n expected = [mentor.key() for mentor in bar_mentors] + [bar_org_admin.key()]\n\n actual = profile_logic.queryAllMentorsKeysForOrg(self.bar_org)\n self.assertEqual(expected, actual)", "def test_get_all_orgs(self):\n expected_orgs = [self.test_config1['course_org_filter'], self.test_config2['course_org_filter']]\n # add SiteConfiguration to database\n SiteConfigurationFactory.create(\n site=self.site,\n site_values=self.test_config1\n )\n SiteConfigurationFactory.create(\n site=self.site2,\n site_values=self.test_config2\n )\n\n # Test that the default value is returned if the value for the given key is not found in the configuration\n self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)", "def test_naming_restrictions(self):\n org1 = self.organization_manager.create(self.admin_token, 'Org 1')\n self.assertRaises(facade.models.ModelDataValidationError, self.organization_manager.create, self.admin_token, 'Org 1')\n\n org2 = self.organization_manager.create(self.admin_token, 'Org 2', {'parent' : org1.id})\n org3 = self.organization_manager.create(self.admin_token, 'Sales Department', {'parent' : org1.id})\n self.assertRaises(facade.models.ModelDataValidationError, self.organization_manager.create, self.admin_token, 'Sales Department', {'parent' : org1.id})\n org4 = self.organization_manager.create(self.admin_token, 'Sales Department', {'parent' : org2.id})", "def test_organization_resources_get(self):\n pass", "def test_organization_id_put(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asserts the same identity provider cannot be linked to several organizations
def test_one_identity_provider_many_organizations(self): with self.assertRaises(ValidationError) as exception_context_manager: IdpOrganizationAssociation.objects.create( organization=OrganizationFactory(), idp_identifier=self.idp_entity_id, ) raised_exception = exception_context_manager.exception self.assertListEqual( raised_exception.messages, [ "Idp organization association with this Identity provider ID already exists.", ], )
[ "def test_one_organization_many_identity_providers(self):\n IdpOrganizationAssociation.objects.create(\n organization=self.organization,\n idp_identifier=\"https://some-other-idp.com/entity/id/\",\n )\n\n IdpOrganizationAssociation.objects.create(\n organization=self.organization,\n idp_identifier=\"https://my-idp.com/entity/id/\",\n )\n\n self.assertEqual(\n IdpOrganizationAssociation.objects.filter(\n organization=self.organization\n ).count(),\n 3,\n )", "def testOrgAdminsForOrg(self):\n org_admin_properties = {'org_admin_for': [self.foo_org.key()],\n 'is_org_admin': True}\n\n foo_org_admin1 = seeder_logic.seed(GCIProfile, org_admin_properties)\n foo_org_admin2 = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n org_admin_properties['org_admin_for'] = [self.bar_org.key()]\n bar_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n # Check for self.foo_org (two admins)\n expected = [foo_org_admin1.key(), foo_org_admin2.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.foo_org)]\n self.assertEqual(expected, actual)\n\n # Check for self.bar_org (just one admin)\n expected = [bar_org_admin.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.bar_org)]\n self.assertEqual(expected, actual)", "def test_organizations(self):\n self.assert_requires_auth(self.instance.organizations)", "def test_organization_issues(self):\n self.assert_requires_auth(self.instance.organization_issues, \"org\")", "def test_delete_organization_memberships(self):\n pass", "def test_get_organization_memberships(self):\n pass", "def test_account_alias_list_missing_group(self):\n self.assertNotIn(\"notanaccountalias\", EFConfig.ACCOUNT_ALIAS_LIST)", "def test_change_organization(self):\n pass", "def test_organization_id_delete(self):\n pass", "def test_organization_id_get(self):\n pass", "def test_service_groups_missing_group(self):\n self.assertNotIn(\"not_a_service_group\", EFConfig.SERVICE_GROUPS)", "def test_naming_restrictions(self):\n org1 = self.organization_manager.create(self.admin_token, 'Org 1')\n self.assertRaises(facade.models.ModelDataValidationError, self.organization_manager.create, self.admin_token, 'Org 1')\n\n org2 = self.organization_manager.create(self.admin_token, 'Org 2', {'parent' : org1.id})\n org3 = self.organization_manager.create(self.admin_token, 'Sales Department', {'parent' : org1.id})\n self.assertRaises(facade.models.ModelDataValidationError, self.organization_manager.create, self.admin_token, 'Sales Department', {'parent' : org1.id})\n org4 = self.organization_manager.create(self.admin_token, 'Sales Department', {'parent' : org2.id})", "def test_post_organization_memberships(self):\n pass", "def verify_github_org_membership():\n try:\n username = g.github_user\n token = g.github_token\n except AttributeError:\n raise GitHubAuthenticationError()\n\n # Access the user's organization memberships (need to iterate)\n # https://developer.github.com/v3/orgs/#list-your-organizations\n org_data = iter_github_endpoint(\n 'https://api.github.com/user/orgs',\n auth=(username, token),\n headers={'Accept': 'application/vnd.github.v3+json'}\n )\n\n org_list = [org['login'] for org in org_data]\n if current_app.config['AUTHORIZED_GITHUB_ORG'] not in org_list:\n raise GitHubAuthorizationError()", "def test_assert_can_join(users, groups): # noqa\n\n # Non-auditor can join non-audited group as owner.\n assert assert_can_join(groups[\"team-infra\"], users[\"zay@a.co\"], role=\"owner\")\n\n # Auditor can join non-audited group as owner.\n assert assert_can_join(groups[\"team-infra\"], users[\"zorkian@a.co\"], role=\"owner\")\n\n # Non-auditor can NOT join audited group as owner.\n with pytest.raises(UserNotAuditor):\n assert not assert_can_join(groups[\"serving-team\"], users[\"zay@a.co\"], role=\"owner\")\n\n # Non-auditor can join audited group as member.\n assert assert_can_join(groups[\"serving-team\"], users[\"zay@a.co\"])\n\n # Group with non-auditor owner can NOT join audited group.\n with pytest.raises(UserNotAuditor):\n assert not assert_can_join(groups[\"serving-team\"], groups[\"tech-ops\"])\n\n # Group with auditor owner can join audited group.\n assert assert_can_join(groups[\"serving-team\"], groups[\"sad-team\"])\n\n # Group with non-auditor owner can join non-audited group.\n assert assert_can_join(groups[\"team-infra\"], groups[\"tech-ops\"])\n\n # Group with auditor owner, but sub-group with non-auditor owner, can NOT join audited group.\n with pytest.raises(UserNotAuditor):\n assert not assert_can_join(groups[\"audited-team\"], groups[\"serving-team\"])", "def test_plonegroupOrganizationRemoved_4(self):\n # set uid in dict\n self.portal['acontent1'].pg_organization = {'uid': self.contacts[0].UID()}\n view = self.portal.restrictedTraverse(\n '{0}/{1}/department1/delete_confirmation'.format(DEFAULT_DIRECTORY_ID, PLONEGROUP_ORG))\n self.assertRaises(LinkIntegrityNotificationException, view.render)\n storage = ILinkIntegrityInfo(view.REQUEST)\n breaches = storage.getIntegrityBreaches()\n self.assertIn(self.contacts[0], breaches)\n self.assertSetEqual(breaches[self.contacts[0]], set([self.portal['acontent1']]))\n # set uid in list\n self.portal['acontent2'].pg_organization = [self.contacts[1].UID()]\n view = self.portal.restrictedTraverse(\n '{0}/{1}/department2/delete_confirmation'.format(DEFAULT_DIRECTORY_ID, PLONEGROUP_ORG))\n self.assertRaises(LinkIntegrityNotificationException, view.render)\n storage = ILinkIntegrityInfo(view.REQUEST)\n breaches = storage.getIntegrityBreaches()\n self.assertIn(self.contacts[1], breaches)\n self.assertSetEqual(breaches[self.contacts[1]], set([self.portal['acontent2']]))", "def test_fail_on_unequal_cohorts(self):\n response = self.request_bulk_enroll({\n 'identifiers': self.notenrolled_student.username,\n 'action': 'enroll',\n 'email_students': False,\n 'courses': self.course_key,\n 'cohorts': \"cohort1,cohort2\"\n })\n self.assertContains(\n response,\n 'If provided, the cohorts and courses should have equal number of items.',\n status_code=400,\n )", "def test_getting_affiliation_invitations_for_the_org(app, client, jwt, session, keycloak_mock, business_mock,\n stan_server):\n orig_val_max_number_of_orgs = app.config.get('MAX_NUMBER_OF_ORGS')\n app.config.update(MAX_NUMBER_OF_ORGS=10)\n # setup all the required data\n headers, org_id_0a, org_id_0b, business_identifier = setup_affiliation_invitation_data(client,\n jwt,\n session,\n keycloak_mock)\n new_org_ids, new_business_identifiers = setup_additional_affiliation_invitation_data(client,\n jwt,\n session,\n keycloak_mock)\n\n _create_affiliations_for_test(client, headers,\n org_id1=new_org_ids[0],\n org_id2=new_org_ids[1],\n org_id3=new_org_ids[2],\n org_id4=new_org_ids[3],\n business_identifier1=business_identifier,\n business_identifier2=new_business_identifiers[0])\n\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n expected_org_id = new_org_ids[0]\n url = f'/api/v1/affiliationInvitations?orgId={expected_org_id}&businessDetails=True'\n affiliation_invitations_response = client.get(url, headers=headers)\n affiliation_invitations_dict: dict = json.loads(affiliation_invitations_response.data)\n affiliation_invitations = affiliation_invitations_dict['affiliationInvitations']\n\n assert len(affiliation_invitations) == 2 # should be two, one for 'toOrg' other for 'fromOrg'\n assert affiliation_invitations[0]['toOrg']['id'] == expected_org_id \\\n or affiliation_invitations[0]['fromOrg']['id'] == expected_org_id\n assert affiliation_invitations[1]['toOrg']['id'] == expected_org_id \\\n or affiliation_invitations[1]['fromOrg']['id'] == expected_org_id\n\n app.config.update(MAX_NUMBER_OF_ORGS=orig_val_max_number_of_orgs)", "def testOnlyACorrectOrgAdminCanEditAnrOrgProfilePage(self):\n self.timeline.orgSignup()\n #make the current user to be a mentor for self.org and test for 403.\n self.data.createMentor(self.org)\n url = '/gsoc/profile/organization/' + self.org.key().name()\n self.timeline.orgSignup()\n response = self.get(url)\n self.assertResponseForbidden(response)\n\n from soc.modules.gsoc.models.organization import GSoCOrganization\n other_organization = seeder_logic.seed(GSoCOrganization)\n self.data.createOrgAdmin(other_organization)\n url = '/gsoc/profile/organization/' + self.org.key().name()\n response = self.get(url)\n self.assertResponseForbidden(response)\n\n #make the current logged in user to be admin for self.org.\n self.data.createOrgAdmin(self.org)\n self.gsoc.allocations_visible = False\n self.gsoc.put()\n\n url = '/gsoc/profile/organization/' + self.org.key().name()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n\n context = response.context\n self.assertEqual(context['page_name'], 'Organization profile')\n self.assertTrue('org_home_page_link' in context)\n self.assertTrue('page_name' in context)\n self.assertFalse('slot_transfer_page_link' in context)\n\n self.gsoc.allocations_visible = True\n self.gsoc.put()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n self.assertTrue('slot_transfer_page_link' in response.context)\n\n self.timeline.studentsAnnounced()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n self.assertFalse('slot_transfer_page_link' in response.context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each of the test case initialize a new KeywordProcessor. Add the keywords the test case to KeywordProcessor. Extract keywords and check if they match the expected result for the test case.
def test_extract_keywords(self): for test_id, test_case in enumerate(self.test_cases): keyword_processor = KeywordProcessor() keyword_processor.add_keywords_from_dict(test_case['keyword_dict']) keywords_extracted = keyword_processor.extract_keywords(test_case['sentence']) self.assertEqual(keywords_extracted, test_case['keywords'], "keywords_extracted don't match the expected results for test case: {}".format(test_id))
[ "def test_extract_keywords_case_sensitive(self):\n for test_id, test_case in enumerate(self.test_cases):\n keyword_processor = KeywordProcessor(case_sensitive=True)\n keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])\n keywords_extracted = keyword_processor.extract_keywords(test_case['sentence'])\n self.assertEqual(keywords_extracted, test_case['keywords_case_sensitive'],\n \"keywords_extracted don't match the expected results for test case: {}\".format(test_id))", "def test_matches_keywords(self):\n # Test case 1\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n txt = \"hypothetical protein\"\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 2\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical Protein\"}]\n txt = \"Hypothetical protein\"\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 3\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 4\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 5\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"},\n {\"Not\": False, \"Keyword\" : \"DNA\"},\n {\"Not\": False, \"Keyword\" : \"polymerase\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 6\n txt = \"hypothetical protein\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"},\n {\"Not\": False, \"Keyword\" : \"DNA\"},\n {\"Not\": False, \"Keyword\" : \"polymerase\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 7\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"*\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n self.assertTrue(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 8\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"*\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertTrue(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 9\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"*\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertFalse(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 10\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"*\"},\n {\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertFalse(Annot_Reader.matches_keywords(txt2, keywords))", "def apply_keywordProcessor(keywordProcessor, text, span_info=True):\r\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\r\n return (keywords_found)", "def apply_keywordProcessor(keywordProcessor, text, span_info=True):\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\n return(keywords_found)", "def test_extract_keywords():\n keywordsChief = KeywordsChief(\"test_data/keywords.yaml\")\n\n assert keywordsChief.extract_keywords([\"\"]) == {}\n assert keywordsChief.extract_keywords([\"unknown\"]) == {}\n assert keywordsChief.extract_keywords([\"python\"]) == {\"python\": 1}\n assert keywordsChief.extract_keywords([\"ml\"]) == {\"machine-learning\": 1}\n assert keywordsChief.extract_keywords([\"machine-learning\"]) == {\"machine-learning\": 1}\n assert keywordsChief.extract_keywords([\"python\", \"functional-programming\", \"unknown\"]) == \\\n {'python': 1, 'functional-programming': 1}\n assert keywordsChief.extract_keywords([\"python\", \"functional-programming\", \"ml\"]) == \\\n {'python': 1, 'functional-programming': 1, 'machine-learning': 1}", "def test_parse_keywords(self):\n # Test Case 1\n keywords = 'hypothetical protein'\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}, \n {\"Not\": False, \"Keyword\" : \"protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 2\n keywords = \"'hypothetical protein'\"\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 3\n keywords = '\"hypothetical protein\"'\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 4\n keywords = 'DNA Polymerase \"hypothetical protein\" Glutimate'\n exp = [{\"Not\": False, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": False, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 5\n keywords = 'NOT hypothetical protein'\n exp = [{\"Not\": True, \"Keyword\" : \"hypothetical\"}, \n {\"Not\": False, \"Keyword\" : \"protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 6\n keywords = 'hypothetical NOT protein'\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}, \n {\"Not\": True, \"Keyword\" : \"protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 7\n keywords = 'NOT \"hypothetical protein\"'\n exp = [{\"Not\": True, \"Keyword\" : \"hypothetical protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 8\n keywords = 'NOT DNA Polymerase NOT \"hypothetical protein\" Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 9\n keywords = 'NOT (DNA Polymerase) NOT \"hypothetical protein\" Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 10\n keywords = 'NOT -DNA Polymerase) NOT $\"hypothetical protein\" --Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 11\n keywords = 'NOT -DNA Polymerase) NOT $\"hypothetical-protein\" --Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical-protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 12\n keywords = \"NOT -DNA Polymerase) NOT $'hypothetical-protein' --Glutimate\"\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical-protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 13\n keywords = None\n exp = [{\"Not\": False, \"Keyword\" : \"*\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)", "def train(self, documents, **kwargs):\n\n total_data = ' '.join(documents)\n language = kwargs.get('language', 'en')\n max_ngram_size = self.n_gram\n deduplication_thresold = 0.7 # 0.4 ->\n deduplication_algo = 'seqm' #\n windowSize = 1\n numOfKeywords = self.total_keywords_in_training\n\n custom_kw_extractor = yake.KeywordExtractor(lan=language, \n n=max_ngram_size, \n dedupLim=deduplication_thresold,\n dedupFunc=deduplication_algo, \n windowsSize=windowSize,\n top=numOfKeywords, \n features=None)\n \n self.the_total_keywords = custom_kw_extractor.extract_keywords(total_data)", "def keywords_extraction(config):\n\n kws = {\n 'tfidf': kw_keyword_tfidf\n }\n\n # Prompts the user to select an action\n kw = interaction.ask_action(constants.msg_kw, set(kws.keys()))\n if kw == 's':\n return\n\n result_file = interaction.ask_file(constants.msg_results_cluster)\n kw.extract_keywords(config, result_file)", "def test_get_keywords_count_method():\n keywordsChief1 = KeywordsChief(\"test_data/keywords.yaml\")\n assert keywordsChief1.get_keywords_count() == 6\n\n keywordsChief2 = KeywordsChief(\"test_data/keywords_ngram2.yaml\")\n assert keywordsChief2.get_keywords_count() == 6\n\n keywordsChief3 = KeywordsChief(\"test_data/keywords_ngram3.yaml\")\n assert keywordsChief3.get_keywords_count() == 6", "def create_keywordProcessor(list_of_terms, remove_stopwords=True, \n custom_stopword_list=[\"\"]):\n # create a KeywordProcessor\n keyword_processor = KeywordProcessor()\n keyword_processor.add_keywords_from_list(list_of_terms)\n\n # remove English stopwords if requested\n if remove_stopwords == True:\n keyword_processor.remove_keywords_from_list(stopwords.words('english'))\n\n # remove custom stopwords\n keyword_processor.remove_keywords_from_list(custom_stopword_list)\n \n return(keyword_processor)", "def create_keywordProcessor(list_of_terms, remove_stopwords=True,\r\n custom_stopword_list=[\"\"]):\r\n # create a KeywordProcessor\r\n keyword_processor = KeywordProcessor()\r\n keyword_processor.add_keywords_from_list(list_of_terms)\r\n\r\n # remove English stopwords if requested\r\n if remove_stopwords == True:\r\n keyword_processor.remove_keywords_from_list(stopwords.words('english'))\r\n\r\n # remove custom stopwords\r\n keyword_processor.remove_keywords_from_list(custom_stopword_list)\r\n\r\n return (keyword_processor)", "def train(self, documents, **kwargs):\n\n with open('indexList.csv', newline='') as f:\n reader = csv.reader(f)\n index_keywords_ = list(reader)\n \n index_keywords = [] \n for item in index_keywords_:\n a = (item[0], int(item[1]))\n index_keywords.append(a)\n \n \n\n total_data = ' '.join(documents)\n language = kwargs.get('language', 'en')\n max_ngram_size = self.n_gram\n deduplication_thresold = 0.4 # 0.4 ->\n deduplication_algo = 'seqm' #\n windowSize = 2\n numOfKeywords = self.total_keywords_in_training\n\n custom_kw_extractor = yake.KeywordExtractor(lan=language, \n n=max_ngram_size, \n dedupLim=deduplication_thresold,\n dedupFunc=deduplication_algo, \n windowsSize=windowSize,\n top=numOfKeywords, \n features=None)\n \n self.the_total_keywords =index_keywords+custom_kw_extractor.extract_keywords(total_data)", "def test_custom_keyword_file_loading():\n keywordsChief = KeywordsChief(\"test_data/keywords.yaml\")\n assert keywordsChief._keywords is not None\n # now we know exactly, how many keywords are in\n assert len(keywordsChief._keywords) == 6", "def assign_keywords_to_scrapers(all_keywords):\n mode = Config['SCRAPING'].get('scrapemethod')\n\n num_workers = Config['SCRAPING'].getint('num_workers', 1)\n\n if len(all_keywords) > num_workers:\n kwgroups = chunk_it(all_keywords, num_workers)\n else:\n # thats a little special there :)\n kwgroups = [[kw, ] for kw in all_keywords]\n\n return kwgroups", "def test_keyword_file_check():\n # None is accepted\n keywordsChief1 = KeywordsChief(None)\n assert keywordsChief1._keywords is not None\n assert len(keywordsChief1._keywords) >= 0\n\n # Empty string is accepted as well\n keywordsChief2 = KeywordsChief(\"\")\n assert keywordsChief2._keywords is not None\n assert len(keywordsChief2._keywords) >= 0\n\n # most other types are not accepted\n inputs = [True, False, 42, 1.5, [], {}]\n for keyword_file in inputs:\n with pytest.raises(f8a_tagger.errors.InvalidInputError):\n keywordsChief3 = KeywordsChief(keyword_file)\n print(keywordsChief3)", "def test_ruby_keywords(self):\n self.filename = \"parser_tests/ruby_keywords.txt\"\n self.run_parser()\n expected_keywords = ['def', 'return', 'unless', 'return', 'end']\n self.assertEqual(expected_keywords, self.p.keywords)", "def test_filter_keywords():\n assert KeywordsChief.filter_keyword(\"\") == (\"\", [], [])\n # check how the special chars are filtered/ignored by the filter_keywords() method\n assert KeywordsChief.filter_keyword(\"python\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\".python\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"python.\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\".python.\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"_python\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"python_\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"_python_\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"___python___\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"_._python_._\") == (\"python\", [], [])", "def start_process():\n print(datetime.today(), \"Starting keywords process\")\n df_tweets = tweet_logic._data.get_tweet_keywords()\n\n # Upgrade sentiment into label\n if not df_tweets is None:\n count_tweets = len(df_tweets)\n for index in range(count_tweets):\n row_df = df_tweets.iloc[index]\n tweet_id = row_df['tweet_id']\n cleaned_text = row_df['cleaned_text']\n key_cvaccine, key_pharma = keywords.__keywords_process.get_all_keywords(cleaned_text)\n tweet_logic._data.insert_keywords(str(tweet_id), \", \".join(key_cvaccine), \", \".join(key_pharma))\n\n end1 = '\\n'\n if index+1 != count_tweets:\n end1 = '\\r'\n\n print(str(index+1)+\"/\"+str(count_tweets), \"tweets has been updated\", end = end1)\n\n else:\n print(\"No tweets have been found for upgrade.\")\n\n print(datetime.today(), \"Process has been completed.\")", "def test_matches_keyword_pattern_positive():\n assert KeywordsChief.matches_keyword_pattern(\"python\")\n assert KeywordsChief.matches_keyword_pattern(\"ml\")\n assert KeywordsChief.matches_keyword_pattern(\"functional-programming\")\n assert KeywordsChief.matches_keyword_pattern(\"functional_programming\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each of the test case initialize a new KeywordProcessor. Add the keywords the test case to KeywordProcessor. Extract keywords and check if they match the expected result for the test case.
def test_extract_keywords_case_sensitive(self): for test_id, test_case in enumerate(self.test_cases): keyword_processor = KeywordProcessor(case_sensitive=True) keyword_processor.add_keywords_from_dict(test_case['keyword_dict']) keywords_extracted = keyword_processor.extract_keywords(test_case['sentence']) self.assertEqual(keywords_extracted, test_case['keywords_case_sensitive'], "keywords_extracted don't match the expected results for test case: {}".format(test_id))
[ "def test_extract_keywords(self):\n for test_id, test_case in enumerate(self.test_cases):\n keyword_processor = KeywordProcessor()\n keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])\n keywords_extracted = keyword_processor.extract_keywords(test_case['sentence'])\n self.assertEqual(keywords_extracted, test_case['keywords'],\n \"keywords_extracted don't match the expected results for test case: {}\".format(test_id))", "def test_matches_keywords(self):\n # Test case 1\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n txt = \"hypothetical protein\"\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 2\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical Protein\"}]\n txt = \"Hypothetical protein\"\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 3\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 4\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 5\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"},\n {\"Not\": False, \"Keyword\" : \"DNA\"},\n {\"Not\": False, \"Keyword\" : \"polymerase\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 6\n txt = \"hypothetical protein\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"},\n {\"Not\": False, \"Keyword\" : \"DNA\"},\n {\"Not\": False, \"Keyword\" : \"polymerase\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 7\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"*\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n self.assertTrue(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 8\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"*\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertTrue(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 9\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"*\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertFalse(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 10\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"*\"},\n {\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertFalse(Annot_Reader.matches_keywords(txt2, keywords))", "def apply_keywordProcessor(keywordProcessor, text, span_info=True):\r\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\r\n return (keywords_found)", "def apply_keywordProcessor(keywordProcessor, text, span_info=True):\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\n return(keywords_found)", "def test_extract_keywords():\n keywordsChief = KeywordsChief(\"test_data/keywords.yaml\")\n\n assert keywordsChief.extract_keywords([\"\"]) == {}\n assert keywordsChief.extract_keywords([\"unknown\"]) == {}\n assert keywordsChief.extract_keywords([\"python\"]) == {\"python\": 1}\n assert keywordsChief.extract_keywords([\"ml\"]) == {\"machine-learning\": 1}\n assert keywordsChief.extract_keywords([\"machine-learning\"]) == {\"machine-learning\": 1}\n assert keywordsChief.extract_keywords([\"python\", \"functional-programming\", \"unknown\"]) == \\\n {'python': 1, 'functional-programming': 1}\n assert keywordsChief.extract_keywords([\"python\", \"functional-programming\", \"ml\"]) == \\\n {'python': 1, 'functional-programming': 1, 'machine-learning': 1}", "def test_parse_keywords(self):\n # Test Case 1\n keywords = 'hypothetical protein'\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}, \n {\"Not\": False, \"Keyword\" : \"protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 2\n keywords = \"'hypothetical protein'\"\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 3\n keywords = '\"hypothetical protein\"'\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 4\n keywords = 'DNA Polymerase \"hypothetical protein\" Glutimate'\n exp = [{\"Not\": False, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": False, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 5\n keywords = 'NOT hypothetical protein'\n exp = [{\"Not\": True, \"Keyword\" : \"hypothetical\"}, \n {\"Not\": False, \"Keyword\" : \"protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 6\n keywords = 'hypothetical NOT protein'\n exp = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}, \n {\"Not\": True, \"Keyword\" : \"protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 7\n keywords = 'NOT \"hypothetical protein\"'\n exp = [{\"Not\": True, \"Keyword\" : \"hypothetical protein\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 8\n keywords = 'NOT DNA Polymerase NOT \"hypothetical protein\" Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 9\n keywords = 'NOT (DNA Polymerase) NOT \"hypothetical protein\" Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 10\n keywords = 'NOT -DNA Polymerase) NOT $\"hypothetical protein\" --Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 11\n keywords = 'NOT -DNA Polymerase) NOT $\"hypothetical-protein\" --Glutimate'\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical-protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 12\n keywords = \"NOT -DNA Polymerase) NOT $'hypothetical-protein' --Glutimate\"\n exp = [{\"Not\": True, \"Keyword\" : \"DNA\"}, \n {\"Not\": False, \"Keyword\" : \"Polymerase\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical-protein\"},\n {\"Not\": False, \"Keyword\" : \"Glutimate\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)\n # Test Case 13\n keywords = None\n exp = [{\"Not\": False, \"Keyword\" : \"*\"}]\n rslt = Annot_Reader.parse_keywords(keywords)\n self.assertTrue(rslt == exp)", "def train(self, documents, **kwargs):\n\n total_data = ' '.join(documents)\n language = kwargs.get('language', 'en')\n max_ngram_size = self.n_gram\n deduplication_thresold = 0.7 # 0.4 ->\n deduplication_algo = 'seqm' #\n windowSize = 1\n numOfKeywords = self.total_keywords_in_training\n\n custom_kw_extractor = yake.KeywordExtractor(lan=language, \n n=max_ngram_size, \n dedupLim=deduplication_thresold,\n dedupFunc=deduplication_algo, \n windowsSize=windowSize,\n top=numOfKeywords, \n features=None)\n \n self.the_total_keywords = custom_kw_extractor.extract_keywords(total_data)", "def keywords_extraction(config):\n\n kws = {\n 'tfidf': kw_keyword_tfidf\n }\n\n # Prompts the user to select an action\n kw = interaction.ask_action(constants.msg_kw, set(kws.keys()))\n if kw == 's':\n return\n\n result_file = interaction.ask_file(constants.msg_results_cluster)\n kw.extract_keywords(config, result_file)", "def test_get_keywords_count_method():\n keywordsChief1 = KeywordsChief(\"test_data/keywords.yaml\")\n assert keywordsChief1.get_keywords_count() == 6\n\n keywordsChief2 = KeywordsChief(\"test_data/keywords_ngram2.yaml\")\n assert keywordsChief2.get_keywords_count() == 6\n\n keywordsChief3 = KeywordsChief(\"test_data/keywords_ngram3.yaml\")\n assert keywordsChief3.get_keywords_count() == 6", "def create_keywordProcessor(list_of_terms, remove_stopwords=True, \n custom_stopword_list=[\"\"]):\n # create a KeywordProcessor\n keyword_processor = KeywordProcessor()\n keyword_processor.add_keywords_from_list(list_of_terms)\n\n # remove English stopwords if requested\n if remove_stopwords == True:\n keyword_processor.remove_keywords_from_list(stopwords.words('english'))\n\n # remove custom stopwords\n keyword_processor.remove_keywords_from_list(custom_stopword_list)\n \n return(keyword_processor)", "def create_keywordProcessor(list_of_terms, remove_stopwords=True,\r\n custom_stopword_list=[\"\"]):\r\n # create a KeywordProcessor\r\n keyword_processor = KeywordProcessor()\r\n keyword_processor.add_keywords_from_list(list_of_terms)\r\n\r\n # remove English stopwords if requested\r\n if remove_stopwords == True:\r\n keyword_processor.remove_keywords_from_list(stopwords.words('english'))\r\n\r\n # remove custom stopwords\r\n keyword_processor.remove_keywords_from_list(custom_stopword_list)\r\n\r\n return (keyword_processor)", "def train(self, documents, **kwargs):\n\n with open('indexList.csv', newline='') as f:\n reader = csv.reader(f)\n index_keywords_ = list(reader)\n \n index_keywords = [] \n for item in index_keywords_:\n a = (item[0], int(item[1]))\n index_keywords.append(a)\n \n \n\n total_data = ' '.join(documents)\n language = kwargs.get('language', 'en')\n max_ngram_size = self.n_gram\n deduplication_thresold = 0.4 # 0.4 ->\n deduplication_algo = 'seqm' #\n windowSize = 2\n numOfKeywords = self.total_keywords_in_training\n\n custom_kw_extractor = yake.KeywordExtractor(lan=language, \n n=max_ngram_size, \n dedupLim=deduplication_thresold,\n dedupFunc=deduplication_algo, \n windowsSize=windowSize,\n top=numOfKeywords, \n features=None)\n \n self.the_total_keywords =index_keywords+custom_kw_extractor.extract_keywords(total_data)", "def test_custom_keyword_file_loading():\n keywordsChief = KeywordsChief(\"test_data/keywords.yaml\")\n assert keywordsChief._keywords is not None\n # now we know exactly, how many keywords are in\n assert len(keywordsChief._keywords) == 6", "def assign_keywords_to_scrapers(all_keywords):\n mode = Config['SCRAPING'].get('scrapemethod')\n\n num_workers = Config['SCRAPING'].getint('num_workers', 1)\n\n if len(all_keywords) > num_workers:\n kwgroups = chunk_it(all_keywords, num_workers)\n else:\n # thats a little special there :)\n kwgroups = [[kw, ] for kw in all_keywords]\n\n return kwgroups", "def test_keyword_file_check():\n # None is accepted\n keywordsChief1 = KeywordsChief(None)\n assert keywordsChief1._keywords is not None\n assert len(keywordsChief1._keywords) >= 0\n\n # Empty string is accepted as well\n keywordsChief2 = KeywordsChief(\"\")\n assert keywordsChief2._keywords is not None\n assert len(keywordsChief2._keywords) >= 0\n\n # most other types are not accepted\n inputs = [True, False, 42, 1.5, [], {}]\n for keyword_file in inputs:\n with pytest.raises(f8a_tagger.errors.InvalidInputError):\n keywordsChief3 = KeywordsChief(keyword_file)\n print(keywordsChief3)", "def test_ruby_keywords(self):\n self.filename = \"parser_tests/ruby_keywords.txt\"\n self.run_parser()\n expected_keywords = ['def', 'return', 'unless', 'return', 'end']\n self.assertEqual(expected_keywords, self.p.keywords)", "def test_filter_keywords():\n assert KeywordsChief.filter_keyword(\"\") == (\"\", [], [])\n # check how the special chars are filtered/ignored by the filter_keywords() method\n assert KeywordsChief.filter_keyword(\"python\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\".python\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"python.\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\".python.\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"_python\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"python_\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"_python_\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"___python___\") == (\"python\", [], [])\n assert KeywordsChief.filter_keyword(\"_._python_._\") == (\"python\", [], [])", "def start_process():\n print(datetime.today(), \"Starting keywords process\")\n df_tweets = tweet_logic._data.get_tweet_keywords()\n\n # Upgrade sentiment into label\n if not df_tweets is None:\n count_tweets = len(df_tweets)\n for index in range(count_tweets):\n row_df = df_tweets.iloc[index]\n tweet_id = row_df['tweet_id']\n cleaned_text = row_df['cleaned_text']\n key_cvaccine, key_pharma = keywords.__keywords_process.get_all_keywords(cleaned_text)\n tweet_logic._data.insert_keywords(str(tweet_id), \", \".join(key_cvaccine), \", \".join(key_pharma))\n\n end1 = '\\n'\n if index+1 != count_tweets:\n end1 = '\\r'\n\n print(str(index+1)+\"/\"+str(count_tweets), \"tweets has been updated\", end = end1)\n\n else:\n print(\"No tweets have been found for upgrade.\")\n\n print(datetime.today(), \"Process has been completed.\")", "def test_matches_keyword_pattern_positive():\n assert KeywordsChief.matches_keyword_pattern(\"python\")\n assert KeywordsChief.matches_keyword_pattern(\"ml\")\n assert KeywordsChief.matches_keyword_pattern(\"functional-programming\")\n assert KeywordsChief.matches_keyword_pattern(\"functional_programming\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of active constraints at point x.
def active_set_at(self, x: np.ndarray, as_equalities: bool) -> List[Constraint]: return [c.as_equality() if as_equalities else c for c in self.constraints if c.is_active(x)]
[ "def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([c(x) for c in self.constraints])", "def get_active_constraints(self):\n if self.active_constraints_set:\n return self.active_constraints_index\n else:\n raise Exception('Active constraints not set yet!')", "def constraints(self):\n return self._constraints[:]", "def list_constraints(self):\n result = self._query(\"\"\"\n select bq_list_constraints(%s);\n \"\"\", (self.collection_name,))\n return list(map(lambda r: r[0], result))", "def get_pyomo_constraints(self):\n # ToDo: is there a more efficient way to do this\n idx_to_condata = {i: v for v, i in self._condata_to_idx.items()}\n return [idx_to_condata[i] for i in range(len(idx_to_condata))]", "def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([gradient_approximation(c.c, x) for c in self.constraints])", "def get_constraints(self) -> List[str]:\n rows = self.db.query(\"select conname from pg_constraint\")\n return [row.conname for row in rows]", "def getConstraintsQueue(self):\r\n #initialize queue\r\n queue = []\r\n #for all variables\r\n for key in CSP.variables:\r\n #for all neighbours\r\n for neighbour in CSP.constraints[key]:\r\n if [neighbour, key] not in queue:\r\n queue.append([key, neighbour])\r\n return queue", "def constraint(self, x):\n return x[0]", "def calc_constraint_at(self, i: int, x: np.ndarray) -> float:\n return self.constraints[i](x)", "def constraints(self) -> Tuple[Constraint]:\n return tuple(self.__constraints)", "def get_vertex_constraints(self):", "def get_constraints(self) -> List[LayoutConstraint]:\n return self._layout_constraints", "def get_constrained_features(self):\n feature_indices = []\n for rule in self.constraint:\n feature_no = [v[0] for v in rule['n']]\n feature_indices += feature_no\n return list(set(feature_indices))", "def rangeconstraints_GET(self):\n app_id = self.request.swagger_data['appid']\n confkey_id = self.request.swagger_data['ckid']\n rangeconstraints = RangeConstraint.query()\\\n .join(ConfigurationKey)\\\n .filter(ConfigurationKey.id == confkey_id)\\\n .join(Application)\\\n .filter(Application.id == app_id)\n return list(map(lambda _: _.as_dict(), rangeconstraints))", "def capp1_constraints(self):\n constraints = []\n for i in range(1, self.x + 1):\n for k in range(1, self.y + 1):\n equation = f\"\\tcapS{i}{k}: \" # Need S to differentiate between the two capacity constraints\n capp1 = []\n for j in range(1, self.z + 1):\n capp1.append(f\"x{i}{k}{j}\")\n equation += \" + \".join(capp1) + f\" - c{i}{k} <= 0\"\n constraints.append(equation)\n capp1_constraints = \"\\n\".join(constraints)\n capp1_constraints += \"\\n\"\n return capp1_constraints", "def bounds(self):\n return [p.bounds for p in self.params]", "def interaction_constraints(self):\n return self._parms.get(\"interaction_constraints\")", "def activate_constraints(self):\n\t\tfor constraint_dict in self.constraints.values():\n\t\t\tconstraint = constraint_dict['constraint']\n\n\t\t\tif constraint_dict['snap']:\n\t\t\t\tconstraint.Snap()\n\t\t\telse:\n\t\t\t\tconstraint.Active = True\n\n\t\treturn True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the approximated gradient of the function at the point x.
def calc_gradient_at(self, x: np.ndarray) -> np.ndarray: return gradient_approximation(self.f, x)
[ "def gradient(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=AutoDiffXd)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n # TODO(eric.cousineau): Consider restricting this in the future to only be\n # a scalar.\n assert y_ad.size == 1 and y_ad.ndim <= 1, (\n \"The output of `function` must be of a scalar or a vector of size 1\")\n y_ad = y_ad.reshape(()) # To scalar.\n return y_ad.item().derivatives()", "def GetGradient(self, x):\n return _handle.OperatorHandle_GetGradient(self, x)", "def evaluateGradient(function,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros(shape(x)) \n for i in range(0,len(x)):\n # Set the step on the correct variable.\n h[i] = epsilon\n # Approximate derivative using central difference approximation.\n res[i] = (function(x + h) - function(x - h)) / (2 * epsilon)\n # Reset step for next iteration.\n h[i] = 0.0\n return res", "def eval_numerical_gradient(f, x, verbose=False, h=1.e-7):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad", "def gradient(x):\n\t\tpass", "def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n # fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n original_value = x.copy()[ix]\n x[ix] = original_value + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = original_value - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = original_value # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print (ix, grad[ix], fxph, fxmh)\n it.iternext() # step to next dimension\n\n return grad", "def f_grad(self, x):\n gradient = []\n\n for key in self.mean_functions:\n gradient.push(self.mean_functions[key][1](x))\n\n return np.array(gradient)", "def getGradient(function):\n def grad(x):\n return evaluateGradient(function,x) \n return grad", "def grad(self, x):\n raise NotImplementedError('Grad oracle is not implemented.')", "def f_grad(self, x):\n return np.zeros((x.shape[0]))", "def eval_numberical_gradient(f, x):\n fx = f(x) #evaluate function value at original point\n grad = np.zeros(x.shape)\n h = 0.00001\n\n #iterate over all indexes in x\n #np.nditer: It inter as follows:\n #------------->\n #...\n #------------->\n #You should know that it.multi_index is the index\n #of the matrix. And do not forget to interate\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n print \"Now the iterate begins...\"\n while not it.finished:\n #evaluate function at x+h\n ix = it.multi_index\n old_value = x[ix]\n x[ix] = old_value + h #increment by h\n fxh = f(x) #evaluate f(x+h)\n x[ix] = old_value #restore to previous value!!\n #compute the partial derivative\n grad[ix] = (fxh - fx) / h #the slope\n print \"Now the fxh: \" + str(fxh) + \"\\tfx: \" + str(fx) \n print \"and the grad\"+ str(ix) + \"is \" + str(grad[ix]) + '\\n'\n it.iternext() #step to next dimension\n\n print \"Now the iterates ends...\"\n return grad", "def grad_input(self, x):\n # Compute the gradient of the mean function.\n d_kernel = self.kernel.grad_input(x, self.X)\n d_mean = d_kernel.T.dot(self.alpha)\n # Compute the gradient of the standard deviation function. It is\n # absolutely crucial to note that the predict method returns the\n # variance, not the standard deviation, of the prediction.\n sd = np.sqrt(self.predict(x)[1])\n K_cross = self.kernel.cov(x, self.X)\n M = spla.cho_solve((self.L, True), K_cross.T).ravel()\n d_sd = -d_kernel.T.dot(M) / sd\n return d_mean, d_sd", "def _gradient_terms(self, x):\n # gradient of predictive variance of y\n dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)\n dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)\n graminv_KXx = self._graminv_Kx(x)\n\n d_y_predictive_var_dx = dvar_dx - 2. * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)\n\n # gradient of predictive covariance between integral and (x, y)-pair\n dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))\n qKX_graminv = self._qK_graminv() # (1, N)\n dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)\n d_predictive_cov_dx = dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[0, :, :]\n\n return np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx", "def gradient(self, x, w=None):\n # Transform data using inner preprocess, if defined\n x, y = self._check_input(x)\n self._check_is_fitted()\n self._clear_cache()\n\n x_prc = self._forward_preprocess(x, caching=True)\n if self._grad_requires_forward:\n self._forward(x_prc) # this is called only if required\n return self.backward(w)", "def grad(self, x, apply_bcs=True):\n if self.__objective_gradient is None:\n self.compile_objective_gradient()\n\n self.assign_vector(x, apply_bcs=apply_bcs)\n\n # Evaluate gradient and apply boundary conditions.\n g = assemble(self.__objective_gradient)\n # for bc in self.bcs:\n # bc.apply(g)\n\n return g.array()", "def gradient_descent_update(x, gradx, learning_rate):\n return x - learning_rate * gradx", "def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE", "def gradient_approx(self, x, h = 1e-5):\n\t\tx = np.expand_dims(x, axis=-2)\n\t\tx_forward = x.copy()\n\t\tx_forward = x_forward.repeat(repeats=x_forward.shape[-1], axis=-2)\n\t\tdiag_index = np.arange(x_forward.shape[-1])\n\t\tx_forward[:,diag_index, diag_index] += h\n\t\tgrad_true = self.softmax.activation(x_forward) - self.softmax.activation(x)\n\t\treturn grad_true[:,diag_index,diag_index] / h", "def gradient(self,point): \n delta=0.01\n dx=np.array([delta,0,0])\n dy=np.array([0.0,delta,0.0])\n dz=np.array([0.0,0.0,delta])\n distx_m=self.distance_from_point(point-dx)\n distx_p=self.distance_from_point(point+dx)\n disty_m=self.distance_from_point(point-dy)\n disty_p=self.distance_from_point(point+dy)\n distz_m=self.distance_from_point(point-dz)\n distz_p=self.distance_from_point(point+dz)\n grad=np.array([(distx_p-distx_m)/(2*delta),(disty_p-disty_m)/(2*delta),(distz_p-distz_m)/(2*delta)])\n return grad" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }