query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Returns
def load_sample_images(): # Try to import imread from scipy. We do this lazily here to prevent # this module from depending on PIL. try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") ROOT_Dir = os.getcwd() module_path = os.path.join(ROOT_Dir, "images") with open(os.path.join(module_path, 'README.txt')) as f: descr = f.read() filenames = [os.path.join(module_path, filename) for filename in os.listdir(module_path) if filename.endswith(".jpg")] # Load image data for each image in the source folder. images = [imread(filename) for filename in filenames] return Bunch(images=images, filenames=filenames, DESCR=descr)
[ "def load_images(self):\n for f in glob.glob('pix/weather/*.png'):\n base = os.path.splitext(os.path.basename(f))[0]\n self.images[base] = itk.PhotoImage(file=f)", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def _load_images(self):\n if not self.test_set:\n images = []\n masks = []\n for item in self.image_names:\n image = nrrd.read(os.path.join(self.input_folder, 'image', item))[0]\n mask = nrrd.read(os.path.join(self.input_folder, 'mask', item))[0]\n \n images.append(image)\n masks.append(mask)\n \n self.images = images\n self.masks = masks\n else:\n images = []\n for item in self.image_names:\n image = nrrd.read(os.path.join(self.input_folder, 'image', item))[0] \n images.append(image)\n \n self.images = images \n \n print(f\"Loaded {len(self.images)} images and {len(self.masks)} binary masks.\")", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def load_all(self):\n # Discover size of images, so that we can allocate memory efficiently\n # NOTE(rjbruin): assumes that all images have the same size!\n n_images = len(self.samples)\n sample_image = np.array(self.loader(self.samples[0][0]))\n height = sample_image.shape[0]\n width = sample_image.shape[1]\n n_channels = sample_image.shape[2]\n\n self.images = np.zeros((n_images, height, width, n_channels), dtype=\"uint8\")\n try:\n for i, sample in tqdm.tqdm(\n enumerate(self.samples),\n desc=\"Pre-loading dataset\",\n total=len(self.samples),\n ):\n path, _ = sample\n self.images[i] = np.array(self.loader(path))\n except MemoryError:\n raise MemoryError(\n \"Dataset cannot fit in memory! Please run \"\n \"without --ram-dataset or use more memory.\"\n )", "def load_samples(self):\n filename = os.path.join(self.root, self.filename)\n f = gzip.open(filename, \"rb\")\n data_set = pickle.load(f, encoding=\"bytes\")\n f.close()\n images_train = data_set[0][0]\n images_test = data_set[1][0]\n images = np.concatenate((images_train, images_test), axis=0)\n labels_train = data_set[0][1]\n labels_test = data_set[1][1]\n labels = np.concatenate((labels_train, labels_test), axis=0)\n self.dataset_size = labels.shape[0]\n return images, labels", "def load_example_images():\r\n\r\n\tprint(\"Images\")\r\n\r\n\tImage.query.delete()\r\n\r\n\timages = [\r\n \tImage(filesize=800, xdim=1200, ydim=1080, imgtype='jpg', uploaded=datetime.datetime.now(), url='https://res.cloudinary.com/demo/image/upload/Sample.jpg'),\r\n \tImage(filesize=400, xdim=300, ydim=200, imgtype='jpg', uploaded=datetime.datetime.now(), url='http://cdn3.craftsy.com/blog/wp-content/uploads/2015/03/Coneflower.png'),\r\n \tImage(filesize=1000, xdim=1600, ydim=1280, imgtype='png', uploaded=datetime.datetime.now(), url='https://winblogs.azureedge.net/devices/2012/09/808sample.jpg'),\r\n\t Image(filesize=600, xdim=800, ydim=600, imgtype='jpg', uploaded=datetime.datetime.now(), url='http://a4.pbase.com/g9/34/632434/2/150123376.gdGIYwbh.jpg')\r\n\t]\r\n\r\n\tfor image in images:\r\n\t\tdb.session.add(image)\r\n\t\tdb.session.flush()\r\n\r\n\tdb.session.commit()", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def loadFeatures(imageDir, args):\n exampleDict = dict()\n\n if args.sampledImageDir and not os.path.exists(args.sampledImageDir):\n os.makedirs(args.sampledImageDir)\n \n imageFiles = [ file for file in os.listdir(imageDir) if file.endswith(\".jpg\") ]\n for i, imageFile in enumerate(imageFiles):\n fullPath = os.path.join(args.inputdir, imageFile).replace(\"\\\\\", \"/\")\n\n # load greyscale\n image = cv2.imread(fullPath, 0)\n \n # center crop\n image = centerCrop(image, args.centerCropSize, args.centerCropSize)\n if args.sampleResolution < args.centerCropSize:\n image = cv2.resize(image, (args.sampleResolution, args.sampleResolution), interpolation=cv2.INTER_CUBIC)\n \n if args.sampledImageDir:\n cv2.imwrite(os.path.join(args.sampledImageDir, imageFile), image)\n \n exampleId = extractNumericId(imageFile)\n exampleDict[exampleId] = Example(exampleId, image.flatten() / 256.0, None)\n \n if args.maxExamples and i >= args.maxExamples:\n break\n \n return exampleDict", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load_datasets(self):\n file_prefix = self._load_datasets_from\n\n self._train_set.load_images(file_prefix + \"_training.pkl\")\n self._test_set.load_images(file_prefix + \"_testing.pkl\")\n\n self.__loaded_datasets = True", "def load_img(self):\n # Return list of images paths\n img_paths = []\n for ext in ('/*.png', '/*.jpg'):\n img_paths.extend(glob_path(config.IMG_REP+ext))\n # Create dictionnary of images paths\n self.img_paths = {file_name(img_paths[i]): img_paths[i]\n for i in range(len(img_paths))}", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def read_images(do_prewhiten=True):\n char_img_map = OrderedDict()\n for character in CHARACTER_ORDER:\n path = DATASET / character\n image_paths = sorted(path.glob('*.jpg'))\n images = facenet.load_data(image_paths, False, False, IMAGE_SIZE, do_prewhiten)\n char_img_map[character] = images\n\n return char_img_map", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def load_images():\n hw = 28 ** 2 # Number of pixels per image\n n = 60000 # Number of images\n\n with gzip.open('train-images-idx3-ubyte.gz', 'r') as f:\n f.read(16)\n\n buffer = f.read(hw * n)\n images = np.frombuffer(buffer, dtype=np.uint8)\n images = images.reshape(n, hw)\n\n return images", "def _load_image_pool(self):\n self.image_pool = [\n pg.image.load(self.image_folder + file_name).convert_alpha()\n for file_name in os.listdir(self.image_folder)\n ]", "def load_images(paths):\n return [cv2.imread(path, 0) for path in paths]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recreate the (compressed) image from the code book & labels
def recreate_image(codebook, labels, w, h): d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image
[ "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[-1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n #print labels[label_idx], label_idx\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels", "def setup_annotations(self):\n sbd_path = self.sbd_path\n target_path = pjoin(self.root, \"SegmentationClass/pre_encoded\")\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n path = pjoin(sbd_path, \"dataset/train.txt\")\n sbd_train_list = tuple(open(path, \"r\"))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files[\"train\"] + sbd_train_list\n\n pre_encoded = glob.glob(pjoin(target_path, \"*.png\"))\n\n #if len(pre_encoded) != 9733:\n if len(pre_encoded) != 8498:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, \"dataset/cls\", ii + \".mat\")\n data = scipy.io.loadmat(lbl_path)\n lbl = data[\"GTcls\"][0][\"Segmentation\"][0].astype(np.int32)\n lbl = toimage(lbl, high=lbl.max(), low=lbl.min())\n import imageio\n imageio.imwrite(pjoin(target_path, ii + \".png\"), lbl)", "def preprocess(self):\n timeStart = time.time()\n self.canvasClicked.addingCoco = False\n self.canvasClicked.addingNoncoco = False\n self.canvasClicked.deleting = False\n if not os.path.isfile(Parameters.codebookFileName):\n imgHeight = self.imgArray.shape[0]\n imgWidth = self.imgArray.shape[1]\n nrRandomSamples = Parameters.bovwCodebookNrRandomSamples\n randomPatchesArrayList = list()\n print \"Creating random samples for building the codebook ...\"\n randomPatchesCenterList = extractRandomPatchCenterFromListWithoutMask(nrRandomSamples, imgHeight, imgWidth)\n\n for randomPatchCenter in randomPatchesCenterList:\n centerX = randomPatchCenter[0]\n centerY = randomPatchCenter[1]\n\n tl_x = int(centerX - Parameters.samplePatchSize / 2)\n tl_y = int(centerY - Parameters.samplePatchSize / 2)\n\n br_x = tl_x + Parameters.samplePatchSize\n br_y = tl_y + Parameters.samplePatchSize\n\n # Replace with boundary when beyond\n tl_x = max(tl_x, 0)\n tl_y = max(tl_y, 0)\n br_x = min(br_x, self.imgArray.shape[1] - 1)\n br_y = min(br_y, self.imgArray.shape[0] - 1)\n\n randomPatchesArrayList.append(self.imgArray[tl_y: br_y + 1, tl_x: br_x + 1,:])\n\n timeRandomPatchForCodebook = time.time()\n print \"Random samples generated!\"\n print \"Generating codebook with {0} random samples, takes {1: .2f} seconds\".format(nrRandomSamples, timeRandomPatchForCodebook - timeStart)\n\n print \"Building the codebook ...\"\n self.codebook = extract_code_for_Images_List(randomPatchesArrayList)\n np.save(Parameters.codebookFileName, self.codebook)\n print \"Codebook built!\"\n\n\n else:\n self.codebook = np.load(Parameters.codebookFileName)\n print \"Codebook loaded!\"\n\n # if not os.path.exists(Parameters.testFeatures):\n self.extractProposalFeaturesForPrediction()\n # np.save(Parameters.testFeatures, self.bovwTestFeatures)\n timeEndPreprocessing = time.time()\n print \"The whole preprocessing takes {0: .2f} seconds!\".format(timeEndPreprocessing - timeStart)\n print \"Test features loaded!\"", "def transform_data(source, destination, format='bmp'):\n\n img_paths = sorted(glob(os.path.join(source, 'imagesTr/*.gz')))\n mask_paths = sorted(glob(os.path.join(source, 'labelsTr/*.gz')))\n\n if not os.path.exists(os.path.join(destination, 'images')):\n os.makedirs(os.path.join(destination, 'images'))\n\n if not os.path.exists(os.path.join(destination, 'masks')):\n os.makedirs(os.path.join(destination, 'masks'))\n\n for idx in range(len(img_paths)):\n img = nib.load(img_paths[idx]).get_fdata()\n mask = nib.load(mask_paths[idx]).get_fdata().astype(np.uint8)\n\n num_slices = img.shape[2]\n for slice_n in range(num_slices):\n img_slice = img[:, :, slice_n]\n img_slice = convert_range(img_slice, np.max(img_slice), np.min(img_slice))\n mask_slice = mask[:, :, slice_n]\n\n # save image and mask\n idx_str = get_correct_idx(idx, len(str(len(img_paths))))\n slice_n_str = get_correct_idx(slice_n, len(str(num_slices)))\n cv2.imwrite(os.path.join(destination, 'images', f'{idx_str}_{slice_n_str}.{format}'), img_slice)\n np.savez(os.path.join(destination, 'masks', f'{idx_str}_{slice_n_str}'), mask_slice)", "def crop_line2png(data_root, save_root):\n\n file_list = os.listdir(data_root)\n file_list = [file_name for file_name in file_list if file_name.rsplit('.', 1)[1] == 'jpg']\n print 'processing images number :%d' % len(file_list)\n s = set()\n counter = 0\n with open(os.path.join(save_root, 'label.txt'), 'w') as label_file:\n for i,img_name in enumerate(file_list):\n im = cv2.imread(os.path.join(data_root, img_name))\n if im is None: continue\n if np.min(im.shape[:2]) < 400 : continue\n gt_txt_path = os.path.join(cfg.data_root, img_name.rsplit('.', 1)[0] + '.txt')\n # gt_txt_path = os.path.join(cfg.gt_root, 'task2_' + img_name.rsplit('.', 1)[0] + '.txt')\n gts_line = parse_txt(gt_txt_path)\n gts_line = [line for line in gts_line if line.label != '###']\n for line in gts_line:\n # replace all non{char, alnum} to *\n line.label = re.sub(ur'[^\\u4e00-\\u9fa5a-zA-Z0-9]{1}', '*', line.label)\n gts_line = [line for line in gts_line if line.label.count('*') < len(line.label)]\n if gts_line is None or len(gts_line) == 0: continue\n # for convient, split Quad struct to (lines, lines_label)\n # line_parse = LineParser()\n # lines, lines_label = split_lines(gts_line)\n # lines = clip_boxes(lines, im.shape)\n # lines, lines_label = line_parse.filter_lines(lines, lines_label)\n\n if not os.path.exists(os.path.join(save_root, 'images')):\n os.makedirs(os.path.join(save_root, 'images'))\n for j,gt_line in enumerate(gts_line):\n rect = cv2.minAreaRect(gt_line.crds)\n box = cv2.cv.BoxPoints(rect)\n bbox = np.int0(box)\n if len(gt_line.label) == 1 and gt_line.label != '*':\n quad = bbox.reshape((4, 2))\n im_new = im\n elif np.linalg.norm(gt_line.crds[0] - gt_line.crds[1]) < np.linalg.norm(gt_line.crds[0] - gt_line.crds[3]) \\\n and (len(gt_line.label) >=2 or gt_line.label == '*'):\n continue\n else:\n im_copy = im.copy()\n poly = bbox.reshape((4, 2))\n # is vertical text\n p_lowest = np.argmax(poly[:, 1])\n p_lowest_right = (p_lowest - 1) % 4\n p_lowest_left = (p_lowest + 1) % 4\n if np.linalg.norm(poly[p_lowest] - poly[p_lowest_right]) > np.linalg.norm(\n poly[p_lowest] - poly[p_lowest_left]):\n start_pt = p_lowest\n end_pt = p_lowest_right\n else:\n start_pt = p_lowest_left\n end_pt = p_lowest\n try:\n angle = np.rad2deg(\n np.arctan((poly[start_pt][1] - poly[end_pt][1]) * 1.0 / (poly[start_pt][0] - poly[end_pt][0])))\n im_new = rotate_image(im_copy, angle)\n crds = list(bbox.reshape((-1)))\n quad = rotate_xml(im_copy, crds, angle)\n quad = quad.reshape((4, 2))\n except:\n continue\n x0 = np.min(quad[:, 0])\n y0 = np.min(quad[:, 1])\n x1 = np.max(quad[:, 0])\n y1 = np.max(quad[:, 1])\n # just for debug\n height = y1 - y0\n expand_ratio = 0.2\n\n new_image_name = '{0}_{1}.{2}'.format(img_name.rsplit('.', 1)[0], j, img_name.rsplit('.', 1)[1])\n s = s | set(gt_line.label)\n label_file.write('%s %s\\n' %(new_image_name, gt_line.label.encode('utf-8')))\n\n expand_y0 = int(max(0, int(y0) - expand_ratio * height))\n expand_y1 = int(min(im_new.shape[0], int(y1) + expand_ratio * height))\n expand_x0 = int(max(0, int(x0) - expand_ratio * height))\n expand_x1 = int(min(im_new.shape[1], int(x1) + expand_ratio * height))\n line_image = im_new[expand_y0: expand_y1, expand_x0: expand_x1]\n if cfg.DEBUG:\n print gt_line.label.encode('utf-8')\n cv2.imshow('', line_image)\n cv2.waitKey(0)\n if not os.path.exists(os.path.join(save_root, 'images')):\n os.mkdir(os.path.join(save_root, 'images'))\n cv2.imwrite(os.path.join(save_root, 'images', new_image_name), line_image)\n counter += 1\n if i % 1000 == 0: print 'proceeded %d' % i\n\n print 'gen line images : %d' %counter\n with open(os.path.join(save_root, 'map.txt'), 'w') as map_file:\n for (ind, label) in enumerate(s):\n map_file.write('%d %s\\n' % (ind, label.encode('utf-8')))\n label_file.close()\n map_file.close()", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def encode(self, img_path):\n self.image_path = img_path\n img_lab_fullres = self.load_image(img_path)\n img_gray = self.load_image(img_path, colorspace=\"gray\")\n ar_utils.save_img(self.output_path, ar_utils.gen_new_gray_filename(img_path), img_gray)\n\n if \"ideepcolor-px\" in self.method:\n filename_mask = ar_utils.gen_new_mask_filename(img_path)\n mask = None\n if self.method == \"ideepcolor-px-grid\":\n mask = self.get_color_mask_grid(img_path, self.grid_size, self.size, self.p)\n mask.save(self.output_path, os.path.basename(filename_mask), grid_size=self.grid_size)\n elif self.method == \"ideepcolor-px-selective\":\n mask = self.get_color_mask_selective(img_path)\n mask.save(self.output_path, os.path.basename(filename_mask))\n # \"ideepcolor-px-grid-exclude\"\n elif self.method == ar_utils.methods[4]:\n mask = self.get_color_mask_grid(img_path, self.grid_size, self.size, self.p, exclude=True)\n mask.save(self.output_path, os.path.basename(filename_mask))\n # \"ideepcolor-px-grid-selective\"\n elif self.method == ar_utils.methods[5]:\n # get two masks, one grid one selective, save both in Decoder combine both\n mask_grid = self.get_color_mask_grid(img_path)\n mask_grid.save(self.output_path, os.path.basename(filename_mask), name_extra=\"1\", grid_size=self.grid_size)\n\n mask_sel = self.get_color_mask_selective(img_path)#, sigma_gauss_div=225, sigma_bilat_div=250)\n mask_sel.save(self.output_path, os.path.basename(filename_mask), name_extra=\"2\")\n \n\n elif self.method == \"ideepcolor-global\":\n self.encode_ideepcolor_global(img_path, self.size)\n\n # ideepcolor-stock: no encoding necessary\n elif self.method == ar_utils.methods[3]:\n pass\n\n else:\n print(\"Error: method not valid:\", self.method)", "def encode_image(text_to_encode= 'SURPRISE!!', template_image=\"images/samoyed.jpg\"):\n encoded_image = Image.open(template_image)\n red_channel = encoded_image.split()[0]\n red_loaded = red_channel.load()\n\n image_text = write_text(\"SURPRISE!!\", encoded_image.size)\n pixels = image_text.load()\n image_text.save(\"images/image_text.png\")\n #encode_pixels= encode_image.load()\n\n x_size = image_text.size[0]\n y_size = image_text.size[1]\n\n for row in range(x_size):\n for col in range(y_size):\n red_pix = red_loaded[row,col]\n #checking to see if the pixel from word file is black\n if pixels[row,col] == (0,0,0):\n #if it is, and if the last bit of the red column is 1, then it will take its complement\n if red_pix & 1 == 1:\n new_pix[row,col]\n red_pix = red_pix & ~1\n # checks to see if the pixel is white\n elif pixels[row,col] == (255,255,255):\n #if the last bit of the red column is 0, then takes the complement\n if red_pix & 1 == 0:\n red_pix= red_pix | 1\n #saves everything and loads it\n red_loaded[row,col] = red_pix\n encoded_image.save(\"images/index.png\")", "def reconstruct(patches, output_name):\n image_patches = {}\n for im in patches:\n image = Image.open(im)\n image_file_basename = os.path.basename(im)\n image_patches[image_file_basename] = image\n patch_width = image.width\n patch_height = image.height\n\n if patch_height != patch_width:\n raise ValueError(\n \"Input sample is missing a patch or so. Height and width don't match\")\n sys.exit(-1)\n\n # calculate the new image width and height - should match original image\n img_size = math.sqrt(len(patches))\n new_h = int(patch_width*img_size)\n new_w = int(patch_width*img_size)\n\n print(\"Total Patches = {}\".format(len(patches)))\n print(\"Patch width, heigh = {}\".format(patch_width))\n print(\"Vertical, horizontal reconstruction strides = {}\".format(\n img_size))\n print(\"New image, (w,h) = ({},{})\".format(new_w, new_h))\n\n # Create place holder for new iamge\n result_image = Image.new('RGB', (new_w, new_h))\n\n cx = 0\n cy = 0\n\n patch_names = list(image_patches.keys())\n if FLAGS.random_shuffle_patches:\n random.shuffle(patch_names)\n\n for patch_name in patch_names:\n patch = image_patches[patch_name]\n print(\"Concat'ing patch \\'{}\\' at position ({},{})\".format(\n patch_name, cx, cy))\n result_image.paste(im=patch, box=(cx*patch_width, cy*patch_width))\n cx += 1\n\n if cx == img_size:\n cx = 0\n cy += 1\n\n print(\"Reconstructed sample, (w,h) = ({},{})\".format(\n result_image.width, result_image.height))\n print(\"Area = {}\".format(result_image.width*result_image.height))\n\n result_image.save(os.path.join(\n FLAGS.recon_output_dir, output_name + \".jpg\"))\n if FLAGS.show_sample:\n result_image.show()", "def recompress_image(image, label):\n image = tf.cast(image, tf.uint8)\n image = tf.image.encode_jpeg(image, optimize_size=True, chroma_downsampling=False)\n return image, label", "def decode(p):\n #assert p.endswith('.' + EXTENSION)\n p2 = os.path.basename(p).replace('baseline.png', '.png')\n p2p = os.path.join('/mnt/Volume0/test/clic2020-devkit/result/', p2) #add by me\n pp = os.path.join('/mnt/Volume0/test/clic2020-devkit/targets',p2)\n p2 = os.path.join('/mnt/Volume0/test/clic2020-devkit/inputs/', p2) #add by me\n p1 = pframe_dataset_shared.get_previous_frame_path(p2)\n #p1 = os.path.join('/mnt/Volume0/test/clic2020-devkit/test_data/inputs/', p1)\n #assert os.path.isfile(p1), (p2, p1, p, len(glob.glob('*.png')))\n b = Image.open(p).convert('L')\n f2_reconstructed = decoder(np.array(Image.open(p1)), b)\n Image.fromarray(f2_reconstructed).save(p2p)\n return f2_reconstructed, np.array(Image.open(pp))", "def create_image_caption_pairs(self):", "def setup_annotations(self):\n sbd_path = get_data_path('sbd')\n target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')\n if not os.path.exists(target_path): os.makedirs(target_path)\n path = pjoin(sbd_path, 'dataset/train.txt')\n sbd_train_list = tuple(open(path, 'r'))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files['train'] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in \\\n sorted(np.unique(train_aug, return_index=True)[1])]\n self.files['train_aug'] = train_aug\n set_diff = set(self.files['val']) - set(train_aug) # remove overlap\n self.files['train_aug_val'] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, '*.png'))\n expected = np.unique(self.files['train_aug'] + self.files['val']).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')\n data = io.loadmat(lbl_path)\n lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + '.png'), lbl)\n\n for ii in tqdm(self.files['trainval']):\n fname = ii + '.png'\n lbl_path = pjoin(self.root, 'SegmentationClass', fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, 'unexpected dataset sizes'", "def encode_decode(self, img, img_metas):\n pass", "def pack_raw(raw):\n\n im = np.maximum(raw - 512, 0) / (16383 - 512) # subtract the black level\n im = np.expand_dims(im, axis=2)\n img_shape = im.shape\n H = img_shape[0]\n W = img_shape[1]\n\n out = np.concatenate((im[0:H:2, 0:W:2, :],\n im[0:H:2, 1:W:2, :],\n im[1:H:2, 1:W:2, :],\n im[1:H:2, 0:W:2, :]), axis=2)\n return out", "def make_clnMod_fromImg(sd_map,int_map,pb_map='',tag='',clean_up=True):\n\n regridded_sd_map = sd_map +'.TMP.regrid'\n out_sd_map = sd_map +tag+'.regrid.jyPix'\n\n imregrid(imagename=sd_map,template=int_map,output=regridded_sd_map,overwrite=True)\n sd_header = imhead(regridded_sd_map)\n int_header = imhead(int_map)\n\n rad_to_arcsec = 206264.81\n twopi_over_eightLnTwo = 1.133\n\n flux_conversion = (rad_to_arcsec*np.abs(sd_header['incr'][0]))*(rad_to_arcsec*np.abs(sd_header['incr'][1])) / (twopi_over_eightLnTwo * sd_header['restoringbeam']['major']['value'] * sd_header['restoringbeam']['minor']['value'])\n\n print(\"====================\")\n print(\"THESE SHOULD BE ARCSEC:\"+sd_header['restoringbeam']['major']['unit']+\" \"+sd_header['restoringbeam']['minor']['unit'])\n print(\"THESE SHOULD BE RADIANS: \"+sd_header['axisunits'][0]+\" \"+sd_header['axisunits'][1])\n print(\"if not the unit conversions were wrong....\")\n print(\"====================\")\n\n flux_string = \"(IM0 * %f)\" % flux_conversion\n\n rmtables(out_sd_map)\n immath(imagename=regridded_sd_map,expr=flux_string,outfile=out_sd_map,mode='evalexpr')\n new_unit = 'Jy/pixel'\n imhead(imagename=out_sd_map,mode='put',hdkey='BUNIT',hdvalue=new_unit)\n\n if (len(pb_map) > 0):\n rmtables('placeholder.im')\n try:\n immath(imagename=[out_sd_map],mode='evalexpr',expr='IM0*1.0',outfile='placeholder.im')\n except:\n print(\" Problem creating placeholder table \")\n else:\n rmtables(out_sd_map)\n immath(imagename=['placeholder.im',pb_map],expr='IM0*IM1',outfile=out_sd_map)\n finally:\n rmtables('placeholder.im')\n\n # clean up after self-\n rmtables(regridded_sd_map)\n\n return out_sd_map", "def generator(self):\n rand_index = np.arange(len(self.image_files))\n if self.shuffle:\n np.random.shuffle(rand_index)\n for index in rand_index:\n image_filename = self.image_files[index]\n # 训练数据\n image_path = self.image_root + image_filename\n if self.check_mask_name():\n mask_path = self.mask_root + image_filename\n else:\n mask_path = self.mask_root + image_filename.split(\".\")[0] + \"_label\" + self.extension\n\n \" Generate label from image filename. Shall be revised according to specific situation\"\n if image_path.split('/')[-1].split('_')[0] == 'n':\n label = np.array([0.0])\n else:\n label = np.array([1.0])\n\n image, mask = self.read_data(image_path, mask_path)\n\n image = image / 255.0\n mask = mask // 255\n\n if self.mode == \"train_segmentation\" or self.mode == \"train_decision\":\n aug_random = np.random.uniform()\n if aug_random > 0.9:\n image, mask = self.augmentor.transform_seg(image, mask)\n # # adjust_gamma\n # if np.random.uniform() > 0.7 and \"adjust_gamma\" in self.augmentation:\n # expo = np.random.choice([0.7, 0.8, 0.9, 1.1, 1.2, 1.3])\n # image = exposure.adjust_gamma(image, expo)\n #\n # # flip\n # if np.random.uniform() > 0.7 and \"flip\" in self.augmentation:\n # aug_seed = np.random.randint(-1, 2)\n # image = cv2.flip(image, aug_seed)\n # mask = cv2.flip(mask, aug_seed)\n #\n # # rotate\n # if np.random.uniform() > 0.7 and \"rotate\" in self.augmentation:\n # angle = np.random.randint(-5, 5)\n # image = self.rotate(image, angle)\n # mask = self.rotate(mask, angle)\n #\n # # GassianBlur\n # if np.random.uniform() > 0.7 and \"GaussianBlur\" in self.augmentation:\n # image = cv2.GaussianBlur(image, (5, 5), 0)\n #\n # # shift\n # if np.random.uniform() > 0.7 and \"shift\" in self.augmentation:\n # dx = np.random.randint(-5, 5) # width*5%\n # dy = np.random.randint(-5, 5) # Height*10%\n # rows, cols = image.shape[:2]\n # M = np.float32([[1, 0, dx], [0, 1, dy]]) # (x,y) -> (dx,dy)\n # image = cv2.warpAffine(image, M, (cols, rows))\n # mask = cv2.warpAffine(mask, M, (cols, rows))\n\n if len(image.shape) == 2:\n image = (np.array(image[:, :, np.newaxis]))\n if len(mask.shape) == 2:\n mask = (np.array(mask[:, :, np.newaxis]))\n\n yield image, mask, label, image_path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
linearly scale the values of an array in the range [0, 1]
def scale01(arr): walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling return walk_arr_01 #return the scaled array
[ "def linear_rescale(min, max, value):\n x = (value - min)/(max - min)\n return x", "def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def rescale(a, range=[0,1]):\n\tmi, ma = np.min(a), np.max(a)\n\treturn (a-mi)/(ma-mi)*(range[1]-range[0])+range[0]", "def scaling(a):\n min_a, max_a = np.min(a), np.max(a)\n return (a + min_a) / (max_a - min_a)", "def scale_array(data, val=100):\n max_data = np.max(data)\n scale = float(val) / max_data\n return np.multiply(data, scale)", "def rescale(array):\n mn = array.min()\n mx = array.max()\n array_ = array.copy()\n array_ -= mn\n array_ /= (mx-mn)\n return array_", "def rescale(old_array, min_, max_):\n scale_factor = (max_ - min_) / (old_array.max() - old_array.min())\n return min_ + scale_factor * (old_array - old_array.min())", "def scale(x):\n min_x, max_x = numpy.min(x), numpy.max(x)\n if min_x != max_x:\n x = (x-min_x)/(max_x-min_x)\n else:\n # all the numbers are the same in x\n x = numpy.asarray([1/len(x) for i in range(len(x)) ])\n return x.tolist()", "def lin_scale( val, x1, y1, x2, y2 ):\r\n x_range = (x2 - x1)\r\n new_val = 0\r\n if x_range is 0:\r\n new_val = y1\r\n else:\r\n y_range = ( y2 - y1 )\r\n new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1\r\n\r\n return new_val", "def _scale(x, lb, ub):\n return (x - lb) / (ub - lb)", "def lin_scale(self):\n self.axes.set_yscale('linear')\n self.update_graph()", "def scale(a, tmin=0.0, tmax=1.0):\n return np.interp(a, (a.min(), a.max()), (tmin, tmax))", "def lin_scale(self):\n self.axes.set_yscale('linear')\n self.update_graph()", "def minmax_scale(input_arr):\n min_val = np.min(input_arr)\n max_val = np.max(input_arr)\n\n output_arr = (input_arr - min_val) * 255.0 / (max_val - min_val)\n\n return output_arr", "def scale_linear(image, **kwargs):\n\n result = rescale_img(image, **kwargs)\n return result[0]", "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def array_normalisation(self, array,new_min=0.0,new_max=1.0):\n\n array = array.astype(float)\n\n old_min = np.amin(array)\n old_max = np.amax(array)\n\n array = new_min + (array - old_min) * (new_max - new_min) / (old_max - old_min)\n\n return array", "def scale(self, factor):\n self.ys *= factor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extends the init_buffer of OffsetColorProgram class by creating the additional carry flag VBO
def _init_buffers(self, v, n, _): super()._init_buffers(v, n, _) self.vbos.append(gl.glGenBuffers(1)) # init VBO 2 - dynamic color data gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) loc = self.get_attribute_location("carried") gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0)) gl.glVertexAttribDivisor(loc, 1) gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)
[ "def set_buffer(self, shader, offset):\n Buffer.bind_vbo(self.vertices)\n Buffer.bind_ebo(self.indexes)\n Buffer.get_attribute_location(shader, \"position\")\n Buffer.vertex_attribute(6, 0, 0)\n Buffer.get_attribute_location(shader, \"aNormal\")\n Buffer.vertex_attribute(6, offset, 1)", "def __init__(self, label, program, primitive_type=GL_LINES, linewidth=1,\n origin=(0, 0, 0), scale=1, filled=False, vertexcount_max=0):\n\n # generate attribute state label aka VAO\n self.vao = glGenVertexArrays(1)\n\n # generate data buffer labels aka VBO\n self.vbo_array = glGenBuffers(1) # this buffer labels positions+colors\n self.vbo_element_array = glGenBuffers(1) # VertexBuffer ID for indices\n\n self.program = program\n self.label = label\n\n self.vertexcount_max = vertexcount_max # maximum number of vertices\n self.vertexcount = 0 # current number of appended/used vertices\n\n self.primitive_type = primitive_type\n self.linewidth = linewidth\n self.filled = filled # if a triangle should be drawn filled\n\n # billboard mode\n self.billboard = False # set to True to always face camera\n self.billboard_axis = None # must be strings \"X\", \"Y\", or \"Z\"\n\n self.scale = scale # 1 local unit corresponds to scale world units\n\n # by default congruent with world origin\n self.origin = QVector3D(*origin)\n self.origin_tuple = origin\n\n # by default not rotated\n self.rotation_angle = 0\n self.rotation_vector = QVector3D(0, 1, 0) # default rotation around Y\n\n self.dirty = True\n\n self.uniforms = {}\n\n # TODO: Support not only for attributes \"color\" and \"position\", but\n # arbitrary formats.\n supported_vertex_format = [\n (\"position\", np.float32, 3),\n (\"color\", np.float32, 4)\n ]\n self.vdata_pos_col = np.zeros(\n self.vertexcount_max, supported_vertex_format)\n\n if \"vdata_indices\" not in list(vars(self).keys()):\n self.vdata_indices = None", "def _create_vertex_buffer(self):\n self._vertices = np.array([\n -1.0, -1.0, 0.5773, 0.0, 0.0,\n 0.0, -1.0, -1.15475, 0.5, 0.0,\n 1.0, -1.0, 0.5773, 1.0, 0.0,\n 0.0, 1.0, 0.0, 0.5, 1.0\n ], dtype=np.float32)\n\n self._vao = glGenVertexArrays(1)\n glBindVertexArray(self._vao)\n self._vbo = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self._vbo)\n glBufferData(GL_ARRAY_BUFFER, self._vertices.nbytes, self._vertices, GL_STATIC_DRAW)", "def _init_plot_buffer(self, configuration):\n if not isinstance(configuration, dict):\n configuration = { 'length': configuration }\n\n # initialize vao/vbo\n vao, vbo = util.VAO(), util.VBO()\n\n # put kernel function into vertex shader\n vertex_shader_kernel = open(SHADER_DIR+'/data.vert.glsl').read()\n if configuration['kernel'] is not None:\n vertex_shader_kernel = vertex_shader_kernel.replace(\n self.KERNEL_PLACEHOLDER,\n configuration['kernel'])\n\n shader = util.Shader(\n vertex=vertex_shader_kernel,\n geometry=open(SHADER_DIR+'/data.geom.glsl').read(),\n fragment=open(SHADER_DIR+'/data.frag.glsl').read(),\n link=True\n )\n norm = configuration.get('norm', float)\n buffer_configuration = {\n 'byte_count': configuration['length'] * 4,\n 'vertex_count': configuration['length']/2,\n 'point_base_color': configuration.get('point_base_color', [0,0,0.5,1]),\n 'point_size': configuration.get('point_size', norm(2.0/configuration['length'])),\n 'vao': vao,\n 'vbo': vbo,\n 'shader': shader\n }\n\n # uniforms\n shader.uniform('mat_plane', self._mat_plot)\n shader.uniform('geometry_color', buffer_configuration['point_base_color'])\n shader.uniform('dot_size', buffer_configuration['point_size'])\n\n # configure vbo\n with vbo.get(0):\n vertex_position = shader.attributeLocation('vertex_position')\n glBufferData(GL_ARRAY_BUFFER, buffer_configuration['byte_count'], None, GL_STATIC_DRAW)\n with vao:\n glVertexAttribPointer(vertex_position, 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n return buffer_configuration", "def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):\n self.gl_objects = gl_buffers\n self.color_vbo, self.pos_vbo = color_vbo, pos_vbo\n self.partNumber = partNumber", "def prepareUniformBuffers(self):\n # Vertex shader uniform buffer block\n uboVSSize = sum([glm.sizeof(ubo) for ubo in self.uboVS.values()])\n bufferInfo = vk.VkBufferCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n size = uboVSSize,\n # This buffer will be used as a uniform buffer\n usage = vk.VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT\n )\n # Create a new buffer\n self.uniformBufferVS['buffer'] = vk.vkCreateBuffer(self.device, bufferInfo, None)\n # Get memory requirements including size, alignment and memory type\n memReqs = vk.vkGetBufferMemoryRequirements(self.device, self.uniformBufferVS['buffer'])\n # Get the memory type index that supports host visibile memory access\n # Most implementations offer multiple memory types and selecting the correct one to allocate memory from is crucial\n # We also want the buffer to be host coherent so we don't have to flush (or sync after every update.\n #Note: This may affect performance so you might not want to do this in a real world application that updates buffers on a regular base\n allocInfo = vk.VkMemoryAllocateInfo(\n sType = vk.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n pNext = None,\n allocationSize = memReqs.size,\n memoryTypeIndex = self.vulkanDevice.getMemoryType(memReqs.memoryTypeBits, vk.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | vk.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)\n )\n # Allocate memory for the uniform buffer\n self.uniformBufferVS['memory'] = vk.vkAllocateMemory(self.device, allocInfo, None)\n # Bind memory to buffer\n vk.vkBindBufferMemory(self.device, self.uniformBufferVS['buffer'], self.uniformBufferVS['memory'], 0)\n # Store information in the uniform's descriptor that is used by the descriptor set\n self.uniformBufferVS['descriptor'] = vk.VkDescriptorBufferInfo(\n buffer = self.uniformBufferVS['buffer'],\n offset = 0,\n range = uboVSSize\n )\n\n self.updateUniformBuffers()", "def __init__(self, shape, pts, texcoords, faces, normals=None, smooth=True):\r\n super(Buffer, self).__init__()\r\n\r\n # Uniform variables all in one array!\r\n self.unib = (c_float * 12)(0.0, 0.0, 0.0,\r\n 0.5, 0.5, 0.5,\r\n 1.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0)\r\n \"\"\" pass to shader array of vec3 uniform variables:\r\n\r\n ===== ============================ ==== ==\r\n vec3 description python\r\n ----- ---------------------------- -------\r\n index from to\r\n ===== ============================ ==== ==\r\n 0 ntile, shiny, blend 0 2\r\n 1 material 3 5\r\n 2 umult, vmult, point_size 6 8\r\n 3 u_off, v_off (only 2 used) 9 10\r\n ===== ============================ ==== ==\r\n \"\"\"\r\n #self.shape = shape\r\n self.textures = []\r\n pts = np.array(pts, dtype=float)\r\n texcoords = np.array(texcoords, dtype=float)\r\n faces = np.array(faces)\r\n\r\n if normals == None: #i.e. normals will only be generated if explictly None\r\n LOGGER.debug('Calculating normals ...')\r\n\r\n normals = np.zeros(pts.shape, dtype=float) #empty array rights size\r\n\r\n fv = pts[faces] #expand faces with x,y,z values for each vertex\r\n #cross product of two edges of triangles\r\n fn = np.cross(fv[:][:][:,1] - fv[:][:][:,0], fv[:][:][:,2] - fv[:][:][:,0])\r\n fn = Utility.normalize_v3(fn)\r\n normals[faces[:,0]] += fn #add up all normal vectors for a vertex\r\n normals[faces[:,1]] += fn\r\n normals[faces[:,2]] += fn\r\n normals = Utility.normalize_v3(normals)\r\n else:\r\n normals = np.array(normals)\r\n \r\n # keep a copy for speeding up the collision testing of ElevationMap\r\n self.vertices = pts\r\n self.normals = normals\r\n self.tex_coords = texcoords\r\n self.indices = faces\r\n self.material = (0.5, 0.5, 0.5, 1.0)\r\n\r\n # Pack points,normals and texcoords into tuples and convert to ctype floats.\r\n n_verts = len(pts)\r\n if len(texcoords) != n_verts:\r\n if len(normals) != n_verts:\r\n self.N_BYTES = 12 # only use pts\r\n self.array_buffer = c_floats(pts.reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 24 # use pts and normals\r\n self.array_buffer = c_floats(np.concatenate((pts, normals),\r\n axis=1).reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 32 # use all three NB doesn't check that normals are there\r\n self.array_buffer = c_floats(np.concatenate((pts, normals, texcoords),\r\n axis=1).reshape(-1).tolist())\r\n\r\n self.ntris = len(faces)\r\n self.element_array_buffer = c_shorts(faces.reshape(-1))\r\n from pi3d.Display import Display\r\n self.disp = Display.INSTANCE # rely on there always being one!\r", "def buildCommandBuffers(self):\n cmdBufInfo = vk.VkCommandBufferBeginInfo(\n sType = vk.VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n pNext = None\n )\n # Set clear values for all framebuffer attachments with loadOp set to clear\n # We use two attachments (color and depth) that are cleared at the start of the subpass and as such we need to set clear values for both\n clearValues = []\n clearValue = vk.VkClearValue(\n color = [[ 0.0, 0.0, 0.2, 1.0 ]]\n )\n clearValues.append(clearValue)\n clearValue = vk.VkClearValue(\n depthStencil = [1.0, 0 ]\n )\n clearValues.append(clearValue)\n offset = vk.VkOffset2D(x = 0, y = 0)\n extent = vk.VkExtent2D(width = self.width, height = self.height)\n renderArea = vk.VkRect2D(offset = offset, extent = extent)\n for i in range(len(self.drawCmdBuffers)):\n renderPassBeginInfo = vk.VkRenderPassBeginInfo(\n sType = vk.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,\n pNext = None,\n renderPass = self.renderPass,\n renderArea = renderArea,\n clearValueCount = 2,\n pClearValues = clearValues,\n # Set target frame buffer\n framebuffer = self.frameBuffers[i]\n )\n # wait this buffer to be released\n #vk.vkWaitForFences(self.device, 1, [self.waitFences[i]], vk.VK_TRUE, vk.UINT64_MAX)\n # rebuild this buffer\n vk.vkBeginCommandBuffer(self.drawCmdBuffers[i], cmdBufInfo)\n # Start the first sub pass specified in our default render pass setup by the base class\n # This will clear the color and depth attachment\n vk.vkCmdBeginRenderPass(self.drawCmdBuffers[i], renderPassBeginInfo, vk.VK_SUBPASS_CONTENTS_INLINE)\n # Update dynamic viewport state\n viewport = vk.VkViewport(\n height = float(self.height),\n width = float(self.width),\n minDepth = 0.0,\n maxDepth = 1.0\n )\n vk.vkCmdSetViewport(self.drawCmdBuffers[i], 0, 1, [viewport])\n # Update dynamic scissor state\n offsetscissor = vk.VkOffset2D(x = 0, y = 0)\n extentscissor = vk.VkExtent2D(width = self.width, height = self.height)\n scissor = vk.VkRect2D(offset = offsetscissor, extent = extentscissor)\n vk.vkCmdSetScissor(self.drawCmdBuffers[i], 0, 1, [scissor])\n\n # Bind descriptor sets describing shader binding points\n vk.vkCmdBindDescriptorSets(self.drawCmdBuffers[i], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipelineLayout, 0, 1, [self.descriptorSet], 0, None)\n # Bind the rendering pipeline\n # The pipeline (state object) contains all states of the rendering pipeline, binding it will set all the states specified at pipeline creation time\n vk.vkCmdBindPipeline(self.drawCmdBuffers[i], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipeline);\n # Bind triangle vertex buffer (contains position and colors)\n offsets = [ 0 ]\n vk.vkCmdBindVertexBuffers(self.drawCmdBuffers[i], 0, 1, [self.vertices['buffer']], offsets)\n # Bind triangle index buffer\n vk.vkCmdBindIndexBuffer(self.drawCmdBuffers[i], self.indices['buffer'], 0, vk.VK_INDEX_TYPE_UINT32)\n # Draw indexed triangle\n vk.vkCmdDrawIndexed(self.drawCmdBuffers[i], self.indices['count'], 1, 0, 0, 1)\n # uncomment for imgui support\n self.drawUI(self.drawCmdBuffers[i])\n vk.vkCmdEndRenderPass(self.drawCmdBuffers[i])\n # Ending the render pass will add an implicit barrier transitioning the frame buffer color attachment to\n # VK_IMAGE_LAYOUT_PRESENT_SRC_KHR for presenting it to the windowing system\n vk.vkEndCommandBuffer(self.drawCmdBuffers[i])", "def _initialize_buffers(self) -> None:", "def CreateTextureBuffer(self, p_int, p_int_1, p_int_2, vtkOpenGLBufferObject):\n ...", "def _build_bufferview(buffer, target, byte_length, byte_offset, byte_stride):\n new_buffer_view = {\n \"buffer\": buffer,\n \"byteLength\": byte_length,\n \"byteOffset\": byte_offset\n }\n\n properties_keys = [\"target\", \"byteStride\"]\n properties_values = [target, byte_stride]\n\n for key, val in zip(properties_keys, properties_values):\n if val is not None:\n new_buffer_view[key] = target\n\n return new_buffer_view", "def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load buffer per vertex attribute (in list with index = shader layout)\n for loc, data in enumerate(attributes):\n if data is not None:\n # bind a new vbo, upload its data to GPU, declare size and type\n self.buffers.append(GL.glGenBuffers(1))\n data = np.array(data, np.float32, copy=False) # ensure format\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False) # good format\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)", "def initializeGL(self):\n # background color\n gl.glClearColor(0, 0, 0, 0)\n # create a Vertex Buffer Object with the specified data\n self.vbo = glvbo.VBO(self.data)\n # compile the vertex shader\n vs = compile_vertex_shader(VS)\n # compile the fragment shader\n fs = compile_fragment_shader(FS)\n # compile the vertex shader\n self.shaders_program = link_shader_program(vs, fs)\n vs2 = compile_vertex_shader(VS2)\n fs2 = compile_fragment_shader(FS2)\n self.my_shaders_program = link_shader_program(vs2, fs2)", "def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load a buffer per initialized vertex attribute (=dictionary)\n for loc, data in enumerate(attributes):\n if data is None:\n continue\n\n # bind a new vbo, upload its data to GPU, declare its size and type\n self.buffers += [GL.glGenBuffers(1)]\n data = np.array(data, np.float32, copy=False)\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc) # activates for current vao only\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)\n\n # cleanup and unbind so no accidental subsequent state update\n GL.glBindVertexArray(0)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)", "def prepare_attrib_mapping(self, primitive):\n buffer_info = []\n for name, accessor in primitive.attributes.items():\n info = VBOInfo(*accessor.info())\n info.attributes.append((name, info.components))\n\n if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:\n if buffer_info[-1].interleaves(info):\n buffer_info[-1].merge(info)\n continue\n\n buffer_info.append(info)\n\n return buffer_info", "def initialize(self, context):\n\n wm = context.window_manager\n\n self.__timer = wm.event_timer_add(0.15, context.window)\n self.__active_object_name = context.active_object.name\n self.__active_object_mode = context.active_object.mode\n\n VertexColorTools.VertexColoringEdit.__static_is_active = True\n\n wm.modal_handler_add(self)\n\n # now ensure all needed vertex color layers are initialized with proper colors\n mesh = self.__get_active_object__().data\n for layer_name in _VCT_consts.ColoringLayersTypes.as_list():\n\n if layer_name in mesh.vertex_colors:\n continue\n\n vcolor = mesh.vertex_colors.new(name=layer_name)\n\n buffer = None\n if layer_name == _VCT_consts.ColoringLayersTypes.Color:\n buffer = numpy.array([0.5] * (len(mesh.loops) * 3))\n elif layer_name == _VCT_consts.ColoringLayersTypes.Decal:\n buffer = numpy.array([1.0] * (len(mesh.loops) * 3))\n elif layer_name == _VCT_consts.ColoringLayersTypes.AO:\n buffer = numpy.array([0.5] * (len(mesh.loops) * 3))\n elif layer_name == _VCT_consts.ColoringLayersTypes.AO2:\n buffer = numpy.array([0.5] * (len(mesh.loops) * 3))\n\n if buffer is not None:\n vcolor.data.foreach_set(\"color\", buffer)\n\n # initialize buffers and hash\n self.__vcolors_buffer_arrays = [\n numpy.array([0.0] * (len(mesh.loops) * 3)),\n numpy.array([0.0] * (len(mesh.loops) * 3)),\n numpy.array([0.0] * (len(mesh.loops) * 3)),\n numpy.array([0.0] * (len(mesh.loops) * 3))\n ]\n\n self.__old_vcolors_array_hash = None\n\n return {'RUNNING_MODAL'}", "def _create_bufferview(self, name, buffer, byte_length, byte_offset, byte_stride, target=None):\n new_buffer_view = self._build_bufferview(buffer=self._resolve_mapping(inp=buffer, mapping=self.buffers_map),\n target=target,\n byte_length=byte_length,\n byte_offset=byte_offset,\n byte_stride=byte_stride)\n\n self.bufferViews.append(new_buffer_view)\n\n if name:\n self.bufferViews_map[name] = self._last_index(self.bufferViews)\n\n return self._last_index(self.bufferViews)", "def addBufferChip(feature):\r\n return feature.buffer(1500)", "def pc_output_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
updates the carry flag data (VBO3)
def update_carried(self, data): self.use() gpu_data = np.array(data, dtype=np.float32) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)
[ "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def _init_buffers(self, v, n, _):\n super()._init_buffers(v, n, _)\n\n self.vbos.append(gl.glGenBuffers(1))\n\n # init VBO 2 - dynamic color data\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n loc = self.get_attribute_location(\"carried\")\n gl.glEnableVertexAttribArray(loc)\n gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0))\n gl.glVertexAttribDivisor(loc, 1)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)", "def BVC(self, value):\n if not self.reg.V:\n self.reg.PC += value", "def SCClr3Bits(self):\n cadena = GetTexto(self.choiceParametro1) + \" = 0\"\n cadena += \"\\n\" + GetTexto(self.choiceParametro2) + \" = 0\"\n cadena += \"\\n\" + GetTexto(self.choiceGuardar) + \" = 0\"\n self.txtctrlSeudo.SetValue(cadena)\n self.ValorBloque = 0\n self.ValorBloque = ((Bloque_Clr3Bit<<24)|\\\n int(self.choiceParametro1.GetCurrentSelection())|\\\n int(self.choiceParametro2.GetCurrentSelection()<<8)|\\\n int(self.choiceGuardar.GetCurrentSelection()<<16))\n self.padre.Estado.Bloques[self.numero] = self.ValorBloque\n return cadena", "def buffer_flip(self):\n self._program += super().buffer_flip()\n self._check_pc()\n return", "def sync(self):\n with self:\n GL.glBufferSubData(self._target, 0, self._data.nbytes, self._data)", "def _update_bit_features(self):\n index = 1 if self.is_info_v2 else 0\n for feature, keys in BIT_FEATURES.items():\n status = self.lookup_bit(keys[index])\n self._update_feature(feature, status, False)", "def update(self): \n #TODO: CHECK currect state change.\n # print \"ROB update called\"\n for CDB_elem in self.CDB:\n index = CDB_elem[1] - 1\n if (self.buffer[index]['Busy'] == True and self.buffer[index]['Ready'] == False):\n self.buffer[index]['Ready'] = True\n self.buffer[index]['Value'] = CDB_elem[0]\n # print 'Setting ROB_index ', CDB_elem[1], 'with value ' , self.buffer[index]['Value']", "def addBufferChip(feature):\r\n return feature.buffer(1500)", "def BVS(self, value):\n if self.reg.V:\n self.reg.PC += value", "def carry_flag_16(val):\r\n if (val & 0x10000) != 0x00:\r\n set_flag(F_C)\r\n else:\r\n unset_flag(F_C)", "def carry_flag(val):\r\n if (val & 0x100) != 0x00:\r\n set_flag(F_C)\r\n else:\r\n unset_flag(F_C)", "def update_Vbbp0(self, nbaths, velph, mtype=complex):\n si = self.si\n si.nbaths = nbaths\n self.velph = make_velph_dict(velph, si)\n self.mtype = mtype\n self.Vbbp0 = elph_construct_Vbbp(self, velph)", "def banr(self, a, b, c):\n\n self.registers[c] = self.registers[a] & self.registers[b]", "def update(self):\n self.send_data(self.video_buffer)", "def _update_baf(self):\n # accounting for empty blocks\n self.baf = np.sum(np.multiply(self.array, (self.array == 1))) / (self.gpc_totalblocks * self.num_of_stripes)", "def updatePacketBuffer(self,data):\n \n with self.packetBufferLock:\n if (data): self.packetBuffer.append(data)", "def ggml_add1_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData:\n ...", "def update_crbms(self):\n \n for l in range(self.L):\n if l==0: # first CRBM\n a=self.a\n b=self.b[l]\n c=self.c[l]\n W=self.W[l]\n self.crbms[l].set_param(a,b,c,W)\n elif l==self.L-1 and self.L>1: # last CRBM\n a=self.b[l-1]\n b=self.b[l]\n d=self.c[l-1]\n c=self.c[l]\n W=self.W[l]\n self.crbms[l].set_param(a,b,c,d,W)\n else: # CRBMs in the middle\n a=self.b[l-1]\n b=self.b[l]\n d=self.c[l-1]\n c=self.c[l]\n W=2*self.W[l]\n self.crbms[l].set_param(a,b,c,d,W)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets scale control bitword = 0 x, y frozen scales + 1 x is interactive + 2 y is interactive bit value 0/1 frozen/interactive
def set_scale_control(self, scale_ctl=3): self._scale_ctl = scale_ctl
[ "def setScale(self, x, y):\r\n self.scale(x,y)", "def set_scale(self, scale=2):\n\n CTRL_REG4 = self.single_access_read(0x23)\n\n scaleBits = 0b00 # default value\n self.scale = 2\n\n if scale == 4:\n scaleBits = 0b01\n self.scale = 4\n elif scale == 8:\n scaleBits = 0b10\n self.scale = 8\n elif scale == 16:\n scaleBits = 0b11\n self.scale = 16\n\n CTRL_REG4 = CTRL_REG4 & 0b11001111\n CTRL_REG4 = CTRL_REG4 | (scaleBits<<4)\n\n #print (bin(CTRL_REG4)) # for testing\n\n self.single_access_write(0x23, CTRL_REG4)\n\n return", "def scale_mode():\r\n pass", "def scale(self,id,x,y,s):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.scale(self.h-1-y,x,s,self.w,self.h)\n if state==True:\n self.canvas=np.ones((self.h,self.w,3),dtype=np.uint8)*255\n self.sync=False\n return state", "def set_scale(self, min, max, inc):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.rays.scale\", min, \r\n max, inc)\r\n p2e._app.Exec(arg_str)", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def _style_scale(self):\n active_color = Colors.update_hsv(self.colors.primary, vd=-0.2)\n self._set_option(\"*Scale.background\", self.colors.primary)\n self._set_option(\"*Scale.showValue\", False)\n self._set_option(\"*Scale.sliderRelief\", tk.FLAT)\n self._set_option(\"*Scale.borderWidth\", 0)\n self._set_option(\"*Scale.activeBackground\", active_color)\n self._set_option(\"*Scale.highlightThickness\", 1)\n self._set_option(\"*Scale.highlightColor\", self.colors.border)\n self._set_option(\"*Scale.highlightBackground\", self.colors.border)\n self._set_option(\"*Scale.troughColor\", self.colors.inputbg)", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def change_pos_scale(self):\n self.pos = self.base_pos * self.zoom\n self.rect = self.image.get_rect(center=self.pos)", "def scale(self):", "def _firstScaleCommand(self, val):\n self._widget['command'] = self._scaleCommand", "def cli_scale(ctx):\n pass", "def RatingScale(self):\r\n\t\tself.timer.reset()\r\n\t\tself.respKey = []\r\n\t\tself.choice = []\r\n\t\tself.final_choice = []\r\n\t\tself.keyList = self.respKeys + self.acceptKey\r\n\t\tself.hit_accept = False\r\n\t\tself.y = -0.2 # labels y position\r\n\r\n\t\tself._initScaleTitle()\r\n\t\tself._initExtraText()\r\n\t\tself._initScaleInstruct()\r\n\t\tself._initLine(start = (self.xLeft, -0.05), end = (self.xRight,-0.05))\r\n\t\tself._initScaleMarker(fillColor= self.markerColor)\r\n\t\tself._initTickMarks()\r\n\t\tself._initlabelsList()\r\n\t\tself._initlabelsText()\r\n\t\tself._initChoiceText()\r\n\t\tself._initAcceptText()\r\n\r\n\t\tself.scale_marker.setPos((self.xMid,0), log=None)\r\n\r\n\t\tself.scale_title.draw()\r\n\t\tself.scale_extra_text.draw()\r\n\t\tself.scale_instr_text.draw()\r\n\t\tself.scale_line.draw()\r\n\t\tself.scale_marker.draw()\r\n\t\tself.scale_labels_text.draw()\r\n\r\n\t\tfor label in self.labelsList:\r\n\t\t\tself.scale_labels.setText(label)\r\n\t\t\tself.x = self.labelsPosList[self.labelsList.index(label)]\r\n\t\t\tself.scale_labels.setPos((self.x, self.y),log=None)\r\n\t\t\tself.scale_labels.draw()\r\n\r\n\t\tfor tick in self.tickList:\r\n\t\t\tself.tick_marks.setStart((tick,-0.05), log=None)\r\n\t\t\tself.tick_marks.setEnd((tick,-0.1), log=None)\r\n\t\t\tself.tick_marks.draw()\r\n\r\n\t\tself.win.flip()\r\n\r\n\t\twhile self.hit_accept == False or self.choice == []:\r\n\t\t\tself.respKey = event.getKeys(keyList = self.keyList)\r\n\t\t\tif self.respKey != [] and set(self.respKey).issubset(self.respKeys):\r\n\t\t\t\tself.keyIndex = self.respKeys.index(self.respKey[-1])\r\n\t\t\t\tself.xPos = self.tickList[self.keyIndex]\r\n\t\t\t\tself.choice = self.tickNumber[self.keyIndex]\r\n\t\t\t\tself.choice_text.setText(self.choiceText + str(self.choice))\r\n\t\t\t\tself.scale_title.draw()\r\n\t\t\t\tself.scale_extra_text.draw()\r\n\t\t\t\tself.scale_instr_text.draw()\r\n\t\t\t\tself.scale_line.draw()\r\n\t\t\t\tself.scale_marker.setPos((self.xPos,0), log=None)\r\n\t\t\t\tself.scale_marker.draw()\r\n\t\t\t\tself.scale_labels_text.draw()\r\n\t\t\t\tself.choice_text.draw()\r\n\t\t\t\tself.accept_text.draw()\r\n\t\t\t\tfor label in self.labelsList:\r\n\t\t\t\t\tself.scale_labels.setText(label)\r\n\t\t\t\t\tself.x = self.labelsPosList[self.labelsList.index(label)]\r\n\t\t\t\t\tself.scale_labels.setPos((self.x, self.y),log=None)\r\n\t\t\t\t\tself.scale_labels.draw()\r\n\t\t\t\tfor tick in self.tickList:\r\n\t\t\t\t\tself.tick_marks.setStart((tick,-0.05), log=None)\r\n\t\t\t\t\tself.tick_marks.setEnd((tick,-0.1), log=None)\r\n\t\t\t\t\tself.tick_marks.draw()\r\n\t\t\t\tself.win.flip()\r\n\t\t\tif self.respKey != [] and set(self.respKey).issubset(self.acceptKey):\r\n\t\t\t\tif self.choice != []:\r\n\t\t\t\t\tself.hit_accept = True\r\n\t\t\ttime.sleep(0.2)\r\n\t\tself.response_time = round(self.timer.getTime(),2)\r\n\t\tself.win.setUnits(self.savedUnits, log=None)\r\n\t\tevent.clearEvents('keyboard')\r\n\t\treturn(self.choice, self.response_time)", "def scaleBoard(self, scale):\n self.scaling = scale\n self.my_font.config(size=25 * self.scaling)\n self.reset_button.config(width=40 * self.scaling, height=40 * self.scaling, borderwidth=2 * self.scaling)\n self.board.updateBoardUI(self.scaling)", "def change_scale(self):\n\n if self.pixmap is None:\n return\n self.scale = (self.ui.horizontalSlider.value() + 1) / 100\n height = int(self.scale * self.pixmap.height())\n pixmap = self.pixmap.scaledToHeight(height, QtCore.Qt.TransformationMode.FastTransformation)\n pixmap_item = QtWidgets.QGraphicsPixmapItem(pixmap)\n pixmap_item.setPos(0, 0)\n self.scene.clear()\n self.scene.addItem(pixmap_item)\n self.draw_coded_area()\n msg = _(\"Key + or W zoom in. Key - or Q zoom out\") + \"\\n\"\n msg += _(\"Scale: \") + str(int(self.scale * 100)) + \"%\"\n self.ui.horizontalSlider.setToolTip(msg)", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def set_scale(self, fac):\n self.scale = fac", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get versions of EFI, Boot ROM, OS & Mac Device as well as the SysUUID
def gather_system_versions(self): # Get Mac model ID self.hw_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "model", None, 0)).replace( "\x00", "") if "imacpro" in self.hw_version.lower(): # iMac Pro stores it's EFI data different due it's new architecture # so grab the EFI & SMC ROM versions appropriately raw_efi_list = [] raw_rom_info = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "apple-rom-info", None, 0)) for data in raw_rom_info.split("\n"): if data.strip().startswith("BIOS ID"): raw_efi_list = data.split(":")[1].strip().split(".") break else: self.message( "[-] Could not find raw EFI data to determine EFI versions. Exiting....") return False self.efi_version = "%s.%s.%s" % ( raw_efi_list[0], raw_efi_list[2], raw_efi_list[3]) # Can't currently find the SMC version like this on imac pros .... # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) self.smc_version = "" else: # EFI & SMC ROM versions self.smc_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) raw_efi = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "version", None, 0)).replace( "\x00", "").split(".") self.efi_version = "%s.%s.%s" % ( raw_efi[0], raw_efi[2], raw_efi[3]) # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's # value, but we do want it to be unique however. The Salt value is # never submitted to the API salt = hex(getnode()) sys_uuid = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "IOPlatformUUID", None, 0)).replace( "\x00", "") self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest() # Get the Board-ID, this is how EFI files are matched to running # hardware - Nastee self.board_id = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "board-id", None, 0)).replace( "\x00", "") # Get OS version self.os_version = commands.getoutput("sw_vers -productVersion") # Get build number self.build_num = commands.getoutput("sw_vers -buildVersion") # Carve out the major version as we use this a bunch # self.os_maj_ver = ".".join(self.os_version.split(".")[:2]) # Add gathered info to the dictionary to query the API with self.endpoints_to_check["127.0.0.1"] = { "hashed_uuid": self.h_sys_uuid, "hw_ver": self.hw_version, "rom_ver": self.efi_version, "smc_ver": self.smc_version, "board_id": self.board_id, "os_ver": self.os_version, "build_num": self.build_num} return True
[ "def getOSinfo_all():\n\tagentPlatForm = platform.system()\n\treturn agentPlatForm\n\t# print agentPlatForm\n\t# command_systeminfo = 'uname -a'\n\t# command_HDD_linux = 'df -h'\n\t# #get\n\t# systeminfo = os.system(command_systeminfo)\n\t# IPlist = socket.gethostbyname(socket.gethostname())\n\t# # HDD_linux = os.popen(command_HDD_linux)\n\t# isx86_type = True\n\t# isvware_tpye = True\n\t# machine_tpye = '' #machine tpye include IBM_P IBM_I HUAWEI_X\n\t# machine_model = '' # machine version IBM P710 e.t.\n\t# machine_serialNum = ''\n\t# # p = subprocess.Popen(command,\n\t# # \t\t\t\t\tstdin = subprocess.PIPE,\n\t# # \t\t\t\t\tstdout = subprocess.PIPE,\n\t# # \t\t\t\t\tstderr = subprocess.STDOUT,\n\t# # \t\t\t\t\tshell = False,\n\t# # \t\t\t\t\tclose_fds = sys.platform.startswith('win'),\n\t# # \t\t\t\t\tuniversal_newlines = True,\n\t# # \t\t\t\t\tenv = os.environ)\n\t# return systeminfo, HDD_linux, IPlist", "def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions", "def OS_scan(self):\n commands = [\"lsb_release -a\", \"sw_vers\", \"systeminfo\"] #[linux, mac, windows]\n scan_results = {}\n for command in commands:\n try:\n process = os.popen(command)\n scan_output = str(process.read())\n scan_results[command] = scan_output\n except:\n scan_results[\"Error\"] = \"Unable to detect OS version\"\n\n for key, value in scan_results.items():\n if scan_results[key]:\n return key, scan_results[key].split()\n return \"Error\", scan_results[\"Error\"]", "def get_kernel_versions(self):\n versions = []\n\n with open(self.path) as grub_file:\n for line in grub_file:\n match = re.search(r'kernel /boot/vmlinuz-([\\d.]*)', line)\n if match:\n version = match.group(1).strip()\n if version:\n versions.append(version)\n\n return versions", "def os_vers(self):\n import lsb_release\n try:\n from debian import debian_support\n except ImportError:\n from debian_bundle import debian_support\n\n target_os_ver = None\n our_os_ver = debian_support.Version(\n lsb_release.get_distro_information()['RELEASE'])\n\n if os.path.exists(os.path.join(self.target, '.disk', 'info')):\n with open(os.path.join(self.target, '.disk', 'info'),'r') as f:\n contents = f.readline().split()\n if len(contents) > 2:\n # Consider point releases the same as the initial release\n # (10.04.4 -> 10.04)\n target_os_ver = debian_support.Version(contents[1])\n\n return target_os_ver, our_os_ver", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)", "def test_get_hyperflex_server_firmware_version_list(self):\n pass", "def os_info():\n\n return(sys.version, sys.platform)", "def get_zhinst_firmware_versions(zi_instruments=None):\n if zi_instruments is None:\n zi_instruments = get_all_connected_zi_instruments()\n\n versions, exceptions = {}, {}\n for node in ['system/fwrevision', 'system/fpgarevision']:\n versions[node] = {}\n for dev in zi_instruments:\n try:\n versions[node][f'{dev.name} - {dev.devname}'] = dev.geti(node)\n except Exception:\n try:\n # for QCodes-based devices\n versions[node][f'{dev.name} - {dev.devname}'] = \\\n dev.daq.getInt(f'{dev.devname}/system/fwrevision')\n except Exception as e:\n exceptions[f'{node} for {dev.devname}'] = e\n return versions, exceptions", "def describe_operating_systems():\n pass", "def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()", "def lsbINFO():\n return dict([(l.split(\"=\")[0], l.split(\"=\")[1].strip(\"'\\\"\")) \\\n for l in open(\"/etc/lsb-release\", \"r\").read().strip().split(\"\\n\") if \"=\" in l])", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def device_get_format_version(udev_info):\n return udev_info.get(\"ID_FS_VERSION\")", "def firmware(self):\n oids = ['1.2.840.10036.3.1.2.1.4.5', '1.2.840.10036.3.1.2.1.4.8']\n for oid in oids:\n tmp = self.get_value(oid).split('.')\n if tmp is not None:\n length = len(tmp)\n i = 0\n for piece in tmp:\n if 'v' in piece:\n return 'AirOS ' + '.'.join(tmp[i:length])\n i = i + 1", "def _get_ilo_firmware_version(self):\n\n manager, reset_uri = self._get_ilo_details()\n ilo_firmware_version = manager['Firmware']['Current']['VersionString']\n return {'ilo_firmware_version': ilo_firmware_version}", "def test_v1alpha3_guestosinfo(self):\n pass", "def get_hardware_revision():\n return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)", "def get_version_info():\n return platform.version()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the OS version are you running, what is the highest available build number? Are you running it?
def check_highest_build(self, sys_info, api_results): if not api_results.get("latest_build_number"): self.results[self.current_endpoint]["latest_build_number"] = self.__make_api_get( '/apple/latest_build_number/%s' % (".".join(sys_info.get("os_ver").split(".")[:2]))) self.message("\n\tHighest build number check:") # Validate response from API if self._validate_response(api_results["latest_build_number"]): # Valid response from API - now interpret it if api_results["latest_build_number"][ "msg"] == sys_info.get("build_num"): self.message( "\t\t[+] SUCCESS - You are running the latest build number (%s) of the OS version you have installed (%s)" % (sys_info.get("build_num"), sys_info.get("os_ver"))) elif sys_info.get("build_num")[-1].isalpha(): self.message( "\t\t[!] ATTENTION - It looks like you might be running a development OS build '%s' (%s). The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("build_num"), sys_info.get("os_ver"))) else: self.message( "\t\t[-] ATTENTION - You are NOT running the latest release build number of your OS version (%s). Your build number is %s, the latest release build number is %s" % (sys_info.get("os_ver"), sys_info.get("build_num"), api_results["latest_build_number"]["msg"]))
[ "def version_max():\n return VERSION_MAX", "def get_os_build_version():\n out = subprocess.check_output(['sysctl', '-n', 'kern.osversion'],\n universal_newlines=True).splitlines()\n assert len(out) == 1, out\n return out[0]", "def get_os_release():\n return re.match('[\\d.]+', platform.release()).group(0)", "def get_build_version():\n prefix = \"MSC v.\"\n i = sys.version.find(prefix)\n if i == -1:\n return 6\n i = i + len(prefix)\n s, rest = sys.version[i:].split(\" \", 1)\n majorVersion = int(s[:-2]) - 6\n if majorVersion >= 13:\n # v13 was skipped and should be v14\n majorVersion += 1\n minorVersion = int(s[2:3]) / 10.0\n # I don't think paths are affected by minor version in version 6\n if majorVersion == 6:\n minorVersion = 0\n if majorVersion >= 6:\n return majorVersion + minorVersion\n # else we don't know what version of the compiler this is\n return None", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def _get_os_version(self):\n\n output = self.command(\"sonic-cfggen -y /etc/sonic/sonic_version.yml -v build_version\")\n return output[\"stdout_lines\"][0].strip()", "def get_dev_build_number(self):\n return self._get_thermostat_advanced_info_label(\n \"Firmware Build Number\"\n ) or self._get_thermostat_advanced_info_label(\"Version\")", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def get_os_version_number():\n return subprocess.check_output(['sw_vers', '-productVersion'],\n universal_newlines=True).strip()", "def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'", "def PlatformVersion():\n return ''", "def get_version_info():\n return platform.version()", "def max_version(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"max_version\")", "def OSVersion(self):\n return self._core.get_server_attribute('platformVersion', 'Unknown')", "def os_info():\n\n return(sys.version, sys.platform)", "def _get_version(self):\n version = self.job_config.get(\"os_version\")\n if not version:\n version = DEFAULT_OS_VERSION.get(self.os_type)\n\n return str(version)", "def get_chromeos_version():\r\n try:\r\n get_board_property('CHROMEOS_RELEASE_VERSION')\r\n except:\r\n logging.info(\"CHROMEOS_RELEASE_VERSION not found\")\r\n return -1", "def get_local_machine_release():\n # type: () -> int\n dver = get_local_machine_dver()\n try:\n return int(re.search(r\"\\d+\", dver).group(0))\n except AttributeError: # no match\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Preprocess graphs by casting into FloatTensor and setting to cuda if available
def preprocess(dataset, cuda): for g, _ in dataset: for key_g, val_g in g.ndata.items(): processed = g.ndata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.ndata[key_g] = processed for key_g, val_g in g.edata.items(): processed = g.edata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.edata[key_g] = processed
[ "def ggml_cuda_compute_forward(params: ffi.CData, tensor: ffi.CData) -> bool:\n ...", "def ggml_cuda_transform_tensor(data: ffi.CData, tensor: ffi.CData) -> None:\n ...", "def _to_cpu(self):\n self._get_device('cpu')\n self.to(self.device)\n torch.cuda.empty_cache()", "def ggml_mpi_graph_compute_pre(ctx_mpi: ffi.CData, gf: ffi.CData, n_layers: int) -> None:\n ...", "def _model_to_device(self):\n if next(self.model.parameters()).is_cuda is False:\n self.model.to(self.device)", "def cuda_if_gpu(T):\n\n return T.cuda() if use_cuda else T", "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring '\n 'multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n\n self._tt_config.device_type = _DEVICE_TYPE_CPU\n self._tt_config.num_replicas = 1\n self._tt_config.num_replicas_per_host = 1\n self._tt_config.num_hosts = 1\n self._replica_id = 0\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_before_tt.pbtxt')\n with graph.as_default():\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,\n on_tpu=False)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_after_tt.pbtxt')\n return tensor_fetches", "def ggml_cuda_assign_buffers(tensor: ffi.CData) -> None:\n ...", "def split_fused_prelu(input_graph_def: util.GraphDef) -> util.GraphDef:\n def _predicate(node):\n return (util.is_fused_conv2d(node, b'Prelu')\n or util.is_fused_matmul(node, b'Prelu'))\n return util.replace_matching_nodes(input_graph_def, _predicate,\n _split_fused_op)", "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n if isinstance(graph, func_graph.FuncGraph) or isinstance(\n graph, function._FuncGraph): # pylint: disable=protected-access\n logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. '\n 'Ignoring tracing.')\n return tensor_fetches\n\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring '\n 'multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n # Reset the parameters in case parameters are changed.\n self._parameters = tensor_tracer_flags.TTParameters()\n\n self._tt_config.device_type = _DEVICE_TYPE_CPU\n self._tt_config.num_replicas = 1\n self._tt_config.num_replicas_per_host = 1\n self._tt_config.num_hosts = 1\n self._replica_id = 0\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_before_tt.pbtxt')\n with graph.as_default():\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,\n on_tpu=False)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_after_tt.pbtxt')\n return tensor_fetches", "def set_default_tensor_type(device):\r\n if device in [torch.device(\"cpu\"), \"cpu\"]:\r\n torch.set_default_tensor_type(torch.FloatTensor)\r\n else:\r\n torch.set_default_tensor_type(torch.cuda.FloatTensor)", "def ggml_cuda_assign_buffers_force_inplace(tensor: ffi.CData) -> None:\n ...", "def _addCastOps(self, user_graph_def):\n # Load user-specified graph into memory\n user_graph = tf.Graph()\n with user_graph.as_default():\n tf.import_graph_def(user_graph_def, name=\"\")\n\n # Build a subgraph containing our injected ops\n # TODO: Cheap optimization: if all input tensors are of type float64, just do nothing here\n injected_op_subgraph = tf.Graph()\n # Maps names of input tensors in our original graph to outputs of the injected-op subgraph\n input_map = {}\n with injected_op_subgraph.as_default():\n with tf.name_scope(self.SPARKDL_OP_SCOPE):\n for _, orig_tensor_name in self.getInputMapping():\n orig_tensor = tfx.get_tensor(orig_tensor_name, user_graph)\n # Create placeholder with same shape as original input tensor, but that accepts\n # float64 input from Spark.\n spark_placeholder = tf.placeholder(tf.float64, shape=orig_tensor.shape,\n name=tfx.op_name(orig_tensor_name))\n # If the original tensor was of type float64, just pass through the Spark input\n if orig_tensor.dtype == tf.float64:\n input_map[orig_tensor_name] = spark_placeholder\n # Otherwise, cast the Spark input to the datatype of the original tensor\n else:\n input_map[orig_tensor_name] = tf.cast(spark_placeholder,\n dtype=orig_tensor.dtype)\n tf.import_graph_def(graph_def=user_graph_def, input_map=input_map, name=\"\")\n return injected_op_subgraph.as_graph_def(add_shapes=True)", "def preprocess(adj, features, labels, preprocess_adj=False, preprocess_feature=False, sparse=False, device='cpu'):\n\n if preprocess_adj:\n adj_norm = normalize_adj(adj)\n\n if preprocess_feature:\n features = normalize_feature(features)\n\n labels = torch.LongTensor(labels)\n if sparse:\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n features = sparse_mx_to_torch_sparse_tensor(features)\n else:\n features = torch.FloatTensor(np.array(features.todense()))\n adj = torch.FloatTensor(adj.todense())\n return adj.to(device), features.to(device), labels.to(device)", "def local_gpu_lazy_ifelse(node):\r\n if isinstance(node.op, theano.ifelse.IfElse) and not node.op.gpu:\r\n gpu_ifelse = theano.ifelse.IfElse(node.op.n_outs, gpu=True)\r\n outs_clients = reduce(list.__add__,\r\n [out.clients for out in node.outputs])\r\n if any([(i.owner and isinstance(i.owner.op, HostFromGpu))\r\n for i in node.inputs]) or any(\r\n [c != 'output' and c.op == gpu_from_host for c, idx\r\n in outs_clients]):\r\n\r\n c = node.inputs[0]\r\n outs = node.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n return [host_from_gpu(out) for out in\r\n gpu_ifelse.make_node(c, *outs).outputs]\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if (host_input.owner and\r\n isinstance(host_input.owner.op, theano.ifelse.IfElse) and\r\n not host_input.owner.op.gpu and\r\n # If there is more then 1 outputs, we can't replace it\r\n # here with a local optimizer as we replace the\r\n # GpuFromHost node and the other output of the if won't be\r\n # replaced.\r\n host_input.owner.op.n_outs == 1):\r\n gpu_ifelse = theano.ifelse.IfElse(host_input.owner.op.n_outs,\r\n gpu=True)\r\n\r\n c = host_input.owner.inputs[0]\r\n outs = host_input.owner.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n\r\n outs = gpu_ifelse.make_node(c, *outs).outputs\r\n return outs\r\n\r\n return False", "def local_gpu_conv(node):\r\n def GpuConvOp_from_ConvOp(op):\r\n logical_img_hw = None\r\n\r\n if op.kshp_logical is not None and op.kshp_logical != op.kshp:\r\n return None\r\n #print op.kshp, op.imshp[1:3]\r\n #print op.kshp_logical, logical_img_hw\r\n ret = GpuConv(border_mode=op.out_mode,\r\n subsample=(op.dx, op.dy),\r\n logical_img_hw=logical_img_hw,\r\n logical_kern_hw=op.kshp_logical,\r\n logical_kern_align_top=op.kshp_logical_top_aligned,\r\n kshp=op.kshp,\r\n version=op.version,\r\n verbose=op.verbose,\r\n imshp=op.imshp,\r\n )\r\n if op.imshp_logical is not None:\r\n logical_img_hw = op.imshp_logical[1:3]\r\n if logical_img_hw != op.imshp[1:3]:\r\n # this case is not implemented\r\n #return None\r\n rstride = int(numpy.ceil(op.imshp_logical[1] /\r\n float(op.imshp[1])))\r\n cstride = int(numpy.ceil(op.imshp_logical[2] /\r\n float(op.imshp[2])))\r\n\r\n def make_graph(img, kern):\r\n buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),\r\n img.shape[0], *op.imshp_logical)\r\n img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],\r\n img)\r\n img = gpu_from_host(img)\r\n return ret(img, kern)\r\n\r\n return make_graph\r\n return ret\r\n\r\n def values_eq_approx(a, b):\r\n \"\"\"This fct is needed to don't have DebugMode raise useless\r\n error due to ronding error.\r\n\r\n This happen as We reduce on the two last dimensions, so this\r\n can raise the absolute error if the number of element we\r\n reduce on is significant.\r\n\r\n \"\"\"\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return CudaNdarrayType.values_eq_approx(a, b, atol=atol)\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n #gpu_from_host(conv) -> gpu_conv(gpu_from_host)\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op, conv.ConvOp):\r\n gpu_conv = GpuConvOp_from_ConvOp(host_input.owner.op)\r\n if gpu_conv is None:\r\n return\r\n img, kern = host_input.owner.inputs\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n out = tensor.patternbroadcast(out,\r\n node.outputs[0].broadcastable)\r\n out.values_eq_approx = values_eq_approx\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n return [out]\r\n\r\n if isinstance(node.op, conv.ConvOp):\r\n #conv(host_from_gpu) -> host_from_gpu(gpu_conv)\r\n img, kern = node.inputs\r\n img_on_gpu = (img.owner and isinstance(img.owner.op, HostFromGpu))\r\n kern_on_gpu = (kern.owner and isinstance(kern.owner.op, HostFromGpu))\r\n if img_on_gpu or kern_on_gpu:\r\n gpu_conv = GpuConvOp_from_ConvOp(node.op)\r\n if gpu_conv is None:\r\n return\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n out = tensor.patternbroadcast(\r\n host_from_gpu(out),\r\n node.outputs[0].broadcastable)\r\n out.values_eq_approx = values_eq_approx\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n return [out]", "def local_gpu_conv(node):\r\n def GpuConvOp_from_ConvOp(op):\r\n logical_img_hw = None\r\n\r\n if op.kshp_logical is not None and op.kshp_logical != op.kshp:\r\n return None\r\n #print op.kshp, op.imshp[1:3]\r\n #print op.kshp_logical, logical_img_hw\r\n ret = GpuConv(border_mode=op.out_mode,\r\n subsample=(op.dx, op.dy),\r\n logical_img_hw=logical_img_hw,\r\n logical_kern_hw=op.kshp_logical,\r\n logical_kern_align_top=op.kshp_logical_top_aligned,\r\n kshp=op.kshp,\r\n version=op.version,\r\n verbose=op.verbose,\r\n imshp=op.imshp,\r\n )\r\n if op.imshp_logical is not None:\r\n logical_img_hw = op.imshp_logical[1:3]\r\n if logical_img_hw != op.imshp[1:3]:\r\n # this case is not implemented\r\n #return None\r\n rstride = int(numpy.ceil(op.imshp_logical[1] /\r\n float(op.imshp[1])))\r\n cstride = int(numpy.ceil(op.imshp_logical[2] /\r\n float(op.imshp[2])))\r\n\r\n def make_graph(img, kern):\r\n buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),\r\n img.shape[0], *op.imshp_logical)\r\n img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],\r\n img)\r\n img = gpu_from_host(img)\r\n return ret(img, kern)\r\n\r\n return make_graph\r\n return ret\r\n\r\n def values_eq_approx(a, b):\r\n \"\"\"This fct is needed to don't have DebugMode raise useless\r\n error due to ronding error.\r\n\r\n This happen as We reduce on the two last dimensions, so this\r\n can raise the absolute error if the number of element we\r\n reduce on is significant.\r\n\r\n \"\"\"\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return GpuArrayType.values_eq_approx(a, b, atol=atol)\r\n\r\n img, kern = node.inputs\r\n gpu_conv = GpuConvOp_from_ConvOp(node.op)\r\n if gpu_conv is None:\r\n return\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n out = tensor.patternbroadcast(\r\n host_from_gpu(out),\r\n node.outputs[0].broadcastable)\r\n #op_lifter want the output on the GPU.\r\n out = gpu_from_host(out)\r\n out.values_eq_approx = values_eq_approx\r\n return [out]", "def convert_operations(onnx_graph, opset_version, batch_dim=0, enable_pruning=True):\n weights = {tensor.name: tensor for tensor in onnx_graph.initializer}\n\n for i, node in enumerate(onnx_graph.node):\n # extract only useful inputs\n params = [weights[par_name] for par_name in node.input if par_name in weights]\n\n if node.op_type == \"Add\":\n op = Add(feature_dim=batch_dim + 1) # 0 for CV models and 1 for NLP\n elif node.op_type == \"And\":\n op = OperatorWrapper(torch.logical_and)\n elif node.op_type == \"AveragePool\":\n op = convert_layer(node, \"AvgPool\")\n elif node.op_type == \"BatchNormalization\":\n op = convert_batch_norm_layer(node, params=params)\n elif node.op_type == \"Cast\":\n op = Cast(**extract_attributes(node))\n elif node.op_type == \"Ceil\":\n op = OperatorWrapper(torch.ceil)\n elif node.op_type == \"Clip\":\n op = OperatorWrapper(torch.clamp)\n elif node.op_type == \"Concat\":\n op = partial(torch.cat, **extract_attributes(node))\n elif node.op_type == \"Constant\":\n op = Constant(**extract_attributes(node))\n elif node.op_type == \"ConstantOfShape\":\n op = ConstantOfShape(**extract_attributes(node))\n elif node.op_type == \"Conv\":\n op = convert_layer(node, \"Conv\", params)\n elif node.op_type == \"ConvTranspose\":\n op = convert_layer(node, \"ConvTranspose\", params)\n elif node.op_type == \"Div\":\n op = Div()\n elif node.op_type == \"Elu\":\n op = nn.ELU(**extract_attributes(node), inplace=True)\n elif node.op_type == \"Equal\":\n op = OperatorWrapper(torch.eq)\n elif node.op_type == \"Erf\":\n op = OperatorWrapper(torch.erf)\n elif node.op_type == \"Exp\":\n op = OperatorWrapper(torch.exp)\n elif node.op_type == \"Expand\":\n op = Expand()\n elif node.op_type == \"Flatten\":\n op = Flatten(**extract_attributes(node))\n op.feature_dim = batch_dim + 1 # Necessary for transformers\n elif node.op_type == \"Floor\":\n op = OperatorWrapper(torch.floor)\n elif node.op_type == \"Gather\":\n op = Gather(**extract_attributes(node))\n elif node.op_type == \"GatherND\":\n op = GatherND(**extract_attributes(node))\n elif node.op_type == \"Gemm\":\n op = convert_linear_layer(node, params)\n elif node.op_type == \"GlobalAveragePool\":\n op = GlobalAveragePool()\n elif node.op_type == \"Greater\":\n op = OperatorWrapper(torch.greater)\n elif node.op_type == \"Identity\":\n op = nn.Identity()\n elif node.op_type == \"InstanceNormalization\":\n op = convert_instance_norm_layer(node, params=params)\n elif node.op_type == \"LeakyRelu\":\n op = nn.LeakyReLU(**extract_attributes(node), inplace=True)\n elif node.op_type == \"Less\":\n op = OperatorWrapper(torch.less)\n elif node.op_type == \"Log\":\n op = OperatorWrapper(torch.log)\n elif node.op_type == \"Loop\":\n op = Loop(\n opset_version=opset_version,\n batch_dim=batch_dim,\n **extract_attributes(node),\n )\n elif node.op_type == \"LSTM\":\n op = convert_lstm_layer(node, weights)\n elif node.op_type == \"MatMul\":\n if params:\n weight = _deserialize_to_torch(params[0])\n op = nn.Linear(weight.shape[0], weight.shape[1], bias=False)\n op.weight.data = weight.t()\n\n # check if next node Add to add bias\n next_node = onnx_graph.node[i + 1]\n next_params = [\n weights[par_name]\n for par_name in next_node.input\n if par_name in weights\n ]\n if next_params and next_node.op_type == \"Add\":\n bias = _deserialize_to_torch(next_params[0])\n op.bias = nn.Parameter(bias)\n node.output.pop()\n node.output.extend(next_node.output)\n onnx_graph.node.pop(i + 1) # remove next node\n else:\n op = MatMul()\n elif node.op_type == \"Max\":\n op = OperatorWrapper(torch.max)\n elif node.op_type == \"MaxPool\":\n op = convert_layer(node, \"MaxPool\")\n elif node.op_type == \"Min\":\n op = OperatorWrapper(torch.min)\n elif node.op_type == \"Mul\":\n op = OperatorWrapper(torch.mul)\n elif node.op_type == \"NonMaxSuppression\":\n op = NonMaxSuppression(**extract_attributes(node))\n elif node.op_type == \"Not\":\n op = OperatorWrapper(torch.logical_not)\n elif node.op_type == \"OneHot\":\n op = OneHot(**extract_attributes(node))\n elif node.op_type == \"Or\":\n op = OperatorWrapper(torch.logical_or)\n elif node.op_type == \"Pad\":\n op = Pad(**extract_attributes(node))\n elif node.op_type == \"Pow\":\n op = OperatorWrapper(torch.pow)\n elif node.op_type == \"PRelu\":\n op = PRelu()\n elif node.op_type == \"Range\":\n op = Range()\n elif node.op_type == \"Reciprocal\":\n op = OperatorWrapper(torch.reciprocal)\n elif node.op_type == \"ReduceMax\":\n kwargs = dict(keepdim=True)\n kwargs.update(extract_attributes(node))\n op = partial(torch.max, **kwargs)\n elif node.op_type == \"ReduceMean\":\n kwargs = dict(keepdim=True)\n kwargs.update(extract_attributes(node))\n op = partial(torch.mean, **kwargs)\n elif node.op_type == \"ReduceMin\":\n kwargs = dict(keepdim=True)\n kwargs.update(extract_attributes(node))\n op = partial(torch.min, **kwargs)\n elif node.op_type == \"ReduceProd\":\n kwargs = dict(keepdim=True)\n kwargs.update(extract_attributes(node))\n op = partial(torch.prod, **kwargs)\n elif node.op_type == \"ReduceSum\":\n op = ReduceSum(opset_version=opset_version, **extract_attributes(node))\n elif node.op_type == \"Relu\":\n op = nn.ReLU(inplace=True)\n elif node.op_type == \"Reshape\":\n shape = list(\n filter(lambda x: x.name == node.input[1], onnx_graph.initializer)\n )\n shape = np.copy(numpy_helper.to_array(shape[0])) if shape else None\n op = Reshape(enable_pruning, shape)\n elif node.op_type == \"Resize\":\n op = Resize(**extract_attributes(node))\n elif node.op_type == \"Scatter\":\n op = Scatter(**extract_attributes(node))\n elif node.op_type == \"ScatterElements\":\n op = ScatterElements(**extract_attributes(node))\n elif node.op_type == \"ScatterND\":\n op = ScatterND()\n elif node.op_type == \"Shape\":\n op = Shape()\n elif node.op_type == \"Sigmoid\":\n op = nn.Sigmoid()\n elif node.op_type == \"Slice\":\n op = Slice(**extract_attributes(node))\n elif node.op_type == \"Softmax\":\n kwargs = dict(dim=-1)\n kwargs.update(extract_attributes(node))\n op = nn.Softmax(**kwargs)\n elif node.op_type == \"Softplus\":\n op = nn.Softplus(beta=1)\n elif node.op_type == \"Softsign\":\n op = nn.Softsign()\n elif node.op_type == \"Split\":\n kwargs = extract_attributes(node)\n # if the split_size_or_sections is not in node attributes,\n # the number_of_splits becomes the number of node outputs\n if \"split_size_or_sections\" not in kwargs:\n kwargs[\"number_of_splits\"] = len(node.output)\n op = Split(enable_pruning, **kwargs)\n elif node.op_type == \"Sqrt\":\n op = OperatorWrapper(torch.sqrt)\n elif node.op_type == \"Squeeze\":\n op = Squeeze(opset_version=opset_version, **extract_attributes(node))\n elif node.op_type == \"Sub\":\n op = OperatorWrapper(torch.sub)\n elif node.op_type == \"Tanh\":\n op = OperatorWrapper(torch.tanh)\n elif node.op_type == \"ThresholdedRelu\":\n op = ThresholdedRelu(**extract_attributes(node))\n elif node.op_type == \"Tile\":\n op = Tile()\n elif node.op_type == \"TopK\":\n op = TopK()\n elif node.op_type == \"Transpose\":\n op = Transpose(**extract_attributes(node))\n elif node.op_type == \"Unsqueeze\":\n op = Unsqueeze(opset_version=opset_version, **extract_attributes(node))\n elif node.op_type == \"Upsample\":\n op = Upsample(**extract_attributes(node))\n elif node.op_type == \"Where\":\n op = Where()\n else:\n op = getattr(torch, node.op_type.lower(), None)\n if op is None:\n raise NotImplementedError(\n \"Conversion not implemented for op_type={}.\".format(node.op_type)\n )\n else:\n print(\n \"Automatic inference of operator: {}\".format(node.op_type.lower())\n )\n\n op_name = \"{}_{}\".format(node.op_type, node.output[0])\n op_id = node.output[0]\n yield op_id, op_name, op", "def ggml_cuda_assign_buffers_no_scratch(tensor: ffi.CData) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the languages stored in the dictionaries
def plot_languages(dict_usage_complexities, dict_cognitive_complexity): attested_languages = ( frozenset(['nor', 'and', 'or', 'not']), frozenset(['and', 'or', 'not']), frozenset(['and', 'not']), frozenset(['or', 'not']), ) fig, ax = plt.subplots(figsize=(8.27,4)) for name in dict_usage_complexities.keys(): # if not any([i in ['nc', 'nic', 'bc', 'XOR', 'c', 'ic'] for i in name]) and 'not' in name: if 'not' in name: # if True: usage_complexity = dict_usage_complexities[name] cognitive_complexity = dict_cognitive_complexity[name] if name in attested_languages: color = 'red' zorder = 10 if name == frozenset(['or', 'not']): yshift = 0.4 else: yshift = 0 ax.text( usage_complexity + 0.02, cognitive_complexity + 0.3 + yshift, s=','.join(name), fontsize='x-small' ) else: color='black' zorder = 1 # ax.scatter( # usage_complexity, cognitive_complexity, # color=color, # zorder=zorder # ) # ax.text( # usage_complexity, cognitive_complexity, # s=','.join(name), # fontsize='xx-small', # rotation=90, # color=color # ) ax.scatter(usage_complexity,cognitive_complexity,color=color) ax.set_xlabel('Usage complexity') ax.set_ylabel('Conceptual complexity') # ax.set_xlim(0,3) ax.set_xlim(1.05,2.8) # plt.show() plt.savefig('figure.png', dpi=300, transparent=True)
[ "def cistime_lang():\n timing.plot_scalings(compare='language')", "def visualize_vecDict(vecDict):\n for url in vecDict:\n plt.plot(vecDict[url])\n plt.legend([key for key in vecDict])\n plt.title(f'Vectors for {len(vecDict)} Documents')\n plt.xlabel('Vector Dimensions')\n plt.ylabel('Document Value')\n plt.show()", "def yggtime_lang():\n timing.plot_scalings(compare='language')", "def show_line(dict, xlabel=\"x\", ylabel=\"y\", title=\"title\"):\n plt.clf()\n plt.cla()\n plt.plot(list(dict.keys()), list(dict.values()), alpha=0.4, color = 'g')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()", "def dispersion_plot(self, words):\n from nltk.draw import dispersion_plot\n\n dispersion_plot(self, words)", "def plot_embeddings(M_reduced, word2Ind, words):\n\n # YOUR CODE HERE\n \n for i,type in enumerate(words):\n x_coor,y_coor = M_reduced[word2Ind[type]][0],M_reduced[word2Ind[type]][1]\n \n plt.scatter(x_coor, y_coor, marker='*', color='red')\n plt.text(x_coor+0.05, y_coor+0.05, type, fontsize=12)\n \n plt.show()\n \n #raise NotImplementedError()", "def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()", "def plot_timecourse_language_types(lang_class_prop_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n handles, labels = ax.get_legend_handles_labels()\n\n labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # ax.legend(handles=handles[1:], labels=labels[1:])\n ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_lang_types_\" + file_name + \".png\")\n plt.show()", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def print_languages(config_filepath, label_filepath):\n load_classifier(config_filepath)\n label_filepath = os.path.abspath(label_filepath)\n wili_labels = wili.get_language_data(label_filepath)\n iso2name = dict([(el['ISO 369-3'], el['English'])\n for el in wili_labels])\n print(', '.join(sorted([iso2name[iso]\n for iso in classifier.get_mapping_languages()\n if iso != 'UNK'])))", "def plot_Dictionary():\n #As fields are added to fv3 output just put those in the following dictionary\n # according to the syntax used. Then all you have to do is create a function\n # that defines the clevs, cm, and var_n if it requires unit conversion \n # (e.g., plot_PRATEsfc(var_n) )\n dispatcher={ \n 'u_inc':plot_u_inc,\n 'v_inc':plot_v_inc,\n }\n return dispatcher", "def _plot_dict_scatter(d):\n xvals, yvals = _dict2lists(d)\n pylab.scatter(xvals, yvals)", "def plot_yr_type(names_data,*args):\n name_fig='Años sin datos'\n path_fig=''\n \n if len(args) != len(names_data):\n raise IndexError, \"largo names_data no coincide con args\"\n i = 1\n for arg in args:\n yrs_lost = index_lost(arg, hidecx=True) # Utiliza Fn index_lost\n plt.plot(yrs_lost, ones(yrs_lost,i),'o') # Utiliza Fn ones\n i += 1\n # Poner título en unicode\n title_fig = name_fig.decode('utf8')\n ymin, ymax = plt.ylim()\n plt.ylim( ymin-1, ymax+1 )\n xmin, xmax = plt.xlim()\n plt.xlim( xmin-1, xmax+1 )\n plt.yticks( range(1,len(args)+1), trunc_str(names_data) ) # Utiliza Fn trunc_str\n plt.ylabel(u'Estaci\\xf3n')\n plt.xlabel(u'A\\xf1o')\n plt.title('%s'%title_fig)\n plt.savefig('%s%s'%(path_fig, name_fig))\n plt.close()", "def plot_various():\n load_variables(SYS_VARS.KDDCup_path_names)\n saved_preprocess = \"KDD_train_num_10.npy\"\n data = categorical_labels_conversion (SYS_VARS.KDDCup_path_train_10 , attacks_map, _ATTACK_INDEX_KDD, saved_preprocess, SYS_VARS.KDDCup_path_result, False)\n plot_attacks( attacks_data = data, a_index = _ATTACK_INDEX_KDD)\n #plot_attacks( dataset = SYS_VARS.KDDCup_path_train_10, a_index = _ATTACK_INDEX_KDD)", "def plot_countryperskill(data_df, **args):\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n colors14 = args.get('colors14', ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', \\\n '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \\\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', \\\n '#dd1c77', '#8dd3c7'])\n plt.figure(facecolor='w', figsize=(wdth, hght))\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n # empty plots for legend handlers:\n for i in np.arange(0, len(countries_sel)): # country\n plt.scatter([], [], marker='o', s=markersize, edgecolor='black', linewidth='.4',\\\n c=colors14[i], label=countries[countries_sel[i]])\n plt.legend()\n\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white') # legendspace\n\n # actual plotting:\n for i in np.arange(0, len(countries_sel)): # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([j], data_df[countries[countries_sel[i]]][idx[j]], marker='o', \\\n s=markersize, edgecolor='black', linewidth='.4',\\\n alpha=1., c=colors14[i], zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, j], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(np.arange(0, len(idx)), meth_labels, color='black', rotation=30)\n plt.grid(axis='y')\n # plt.xlabel('Method')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_perScore_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()", "def visualize(country_names):\n countries_array = modified_data()\n current_countries = []\n if type(country_names) != list:\n country_names = [country_names]\n for country in countries_array:\n for country_name in country_names:\n if country_name == country.country_name:\n current_countries.append(country)\n break\n bar_out_of_school(current_countries)\n plot_by_time(current_countries, \"fertility\")\n plot_by_time(current_countries, \"mortality\")\n plot_by_time(current_countries, \"poverty\")\n bar_gdp(current_countries)\n bar_child_marriages(current_countries)", "def plot_embedding(X, y, d, path, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n # Plot colors numbers\n plt.figure(figsize=(10,10))\n ax = plt.subplot(111)\n for i in range(X.shape[0]):\n # plot colored number\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.bwr(d[i] / 1.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\n plt.savefig(path +'/' + title + '.pdf')", "def visualize_stats(diction, plot_image_name, wordcloud_image_name, plot_title, path):\n\n # sort dictionary by values\n sorted_dict = OrderedDict(sorted(diction.items(), key=lambda t: t[1]))\n # get 20 first key-value pairs of sorted dict\n topdict = dict(list(sorted_dict.items())[-20:])\n\n # make horizontal-bar plots\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.yaxis.grid(False)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.barh(range(len(topdict)), list(\n topdict.values()), align='center')\n plt.yticks(range(len(topdict)), list(topdict.keys()))\n plt.xlabel('Frequency')\n plt.title(plot_title)\n # save figure to an image\n plt.savefig(path + plot_image_name, bbox_inches=\"tight\")\n plt.close()\n\n # make word clouds (maximum 100 words)\n wc = WordCloud(width=900, height=600, max_words=100, relative_scaling=1,\n normalize_plurals=False, background_color='white').generate_from_frequencies(diction)\n plt.imshow(wc)\n plt.axis(\"off\")\n # save cloud to an image\n wc.to_file(path + wordcloud_image_name)\n plt.close()", "def plot_distro_for_value_counts_all(df):\n \n c=\"#f7965cff\"\n value_counts_all = pd.DataFrame(df.language.value_counts(ascending=False))\n plt.figure(figsize=(13,10))\n bar = sns.barplot(x=value_counts_all.index, y=\"language\", data=value_counts_all, color = c)\n bar.set_xticklabels(bar.get_xticklabels(),rotation=65)\n bar.set_ylabel(\"counts\")\n\n plt.title(\"How is the data distributed per document for all languages?\")\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge draft invoices. Work only with same partner. You can merge invoices and refund invoices with echa other. Moves all lines on the first invoice.
def merge_invoice(self, cr, uid, invoices, context=None): order_ids = [] pick_ids = [] if len(invoices) <= 1: return False parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id']) for inv in invoices: if parent.partner_id != inv.partner_id: raise osv.except_osv(_("Partners don't match!"), _("Can not merge invoice(s) on different partners or states !.")) if inv.state != 'draft': raise osv.except_osv(_("Invalid action !"), _("You can merge only invoices in draft state.")) # Merge invoices that are in draft state inv_line_obj = self.pool.get('account.invoice.line') name = parent.name comment = parent.comment origin = parent.origin for inv in invoices: if inv.id == parent.id: continue # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head. if inv.name: # Find if the same name already exist, if yes, skip to add. name_list = name.replace(' ', '').split(',') if inv.name not in name_list: name += ', %s' % inv.name if inv.comment: comment = comment and comment + ', %s' % inv.comment or inv.comment if inv.origin: origin += ', %s' % inv.origin line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)]) for inv_lin in inv_line_obj.browse(cr, uid, line_ids): mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id), ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same. ]) if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)}) inv_line_obj.unlink(cr, uid, inv_lin.id) elif inv.type == parent.type: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id}) else: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity}) if inv.sale_order_ids: order_ids += [order.id for order in inv.sale_order_ids] if inv.picking_ids: pick_ids += [picking.id for picking in inv.picking_ids] self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment}) #Remove By DRB #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) self.unlink(cr, uid, [inv.id]) #Distinct List order_ids = list(set(order_ids)) pick_ids = list(set(pick_ids)) self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]}) self.button_reset_taxes(cr, uid, [parent.id]) return parent.id
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def do_merge(self, cr, uid, ids, context=None): \n invent_obj = self.pool.get('stock.inventory')\n invent_line_obj = self.pool.get('stock.inventory.line')\n invent_lines = {}\n if context is None:\n context = {}\n for inventory in invent_obj.browse(cr, uid, context['active_ids'], context=context):\n if inventory.state == \"done\":\n raise osv.except_osv(_('Warning!'),\n _('Merging is only allowed on draft inventories.'))\n\n for line in inventory.inventory_line_id:\n key = (line.location_id.id, line.product_id.id, line.product_uom.id)\n if key in invent_lines:\n invent_lines[key] += line.product_qty\n else:\n invent_lines[key] = line.product_qty\n\n\n new_invent = invent_obj.create(cr, uid, {\n 'name': 'Merged inventory'\n }, context=context)\n\n for key, quantity in invent_lines.items():\n invent_line_obj.create(cr, uid, {\n 'inventory_id': new_invent,\n 'location_id': key[0],\n 'product_id': key[1],\n 'product_uom': key[2],\n 'product_qty': quantity,\n })\n\n return {'type': 'ir.actions.act_window_close'}", "def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):\n move_lines = super(account_invoice, self).finalize_invoice_move_lines(\n cr, uid, invoice_browse, move_lines)\n if invoice_browse.type == u'in_invoice':\n if move_lines and \\\n invoice_browse.currency_id.id != \\\n invoice_browse.company_id.currency_id.id:\n obj_cur = self.pool.get('res.currency')\n cur_brw = obj_cur.browse(\n cr, uid, invoice_browse.currency_id.id, context={})\n if not cur_brw.account_id:\n raise osv.except_osv(\n _('Error!'),\n _('Must specify an account for currency rounding ' +\n 'diff (%s)') % cur_brw.name)\n amount_diff = 0\n for x, y, ln in move_lines:\n ln.update({'debit': round(ln['debit'], 2),\n 'credit': round(ln['credit'], 2)})\n amount_diff += ln['debit'] - ln['credit']\n if amount_diff != 0:\n line = {\n 'account_id': cur_brw.account_id.id,\n 'date': invoice_browse.date_invoice,\n 'partner_id': invoice_browse.partner_id.id,\n 'name': _('Currency rounding diff'),\n 'debit': abs(amount_diff) if amount_diff < 0\n else 0,\n 'credit': abs(amount_diff) if amount_diff > 0\n else 0,\n }\n move_lines.append((0, 0, line))\n return move_lines", "def action_create_invoice(self):\r\n inv_obj = self.env['account.invoice']\r\n inv_line_obj = self.env['account.invoice.line']\r\n # account_id = self.income_acc_id\r\n inv_val = {\r\n 'type': 'out_invoice',\r\n # 'transaction_ids': self.ids,\r\n 'state': 'draft',\r\n 'partner_id': self.customer_name.id or False,\r\n 'date_invoice': fields.Date.context_today(self),\r\n 'origin': self.booking_no,\r\n 'freight_booking': self.id,\r\n 'account_id': self.customer_name.property_account_receivable_id.id or False,\r\n 'company_id': self.company_id.id,\r\n 'user_id': self.sales_person.id,\r\n }\r\n\r\n invoice = inv_obj.create(inv_val)\r\n for line in self.cost_profit_ids:\r\n sale_unit_price_converted = line.list_price * line.profit_currency_rate\r\n account_id = False\r\n if line.product_id.property_account_income_id:\r\n account_id = line.product_id.property_account_income_id\r\n elif line.product_id.categ_id.property_account_income_categ_id:\r\n account_id = line.product_id.categ_id.property_account_income_categ_id\r\n if sale_unit_price_converted > 0:\r\n inv_line = inv_line_obj.create({\r\n 'invoice_id': invoice.id or False,\r\n 'account_id': account_id.id or False,\r\n 'name': line.product_id.name or '',\r\n 'product_id': line.product_id.id or False,\r\n 'quantity': line.profit_qty or 0.0,\r\n 'uom_id': line.uom_id.id or False,\r\n 'price_unit': sale_unit_price_converted or 0.0\r\n })\r\n line.write({'invoice_id': invoice.id or False,\r\n 'inv_line_id': inv_line.id or False})\r\n\r\n self.shipment_booking_status = '10'", "def action_move_create(self):\n res = super(HrExpenseExpense, self).action_move_create()\n for expense in self:\n if expense.invoice:\n partner = expense.invoice.partner_id.commercial_partner_id\n move_lines = expense.account_move_id.line_ids\n c_move_lines = move_lines.filtered(\n lambda x: x.partner_id == partner and\n x.debit == abs(round(expense.invoice.residual, 2)))\n c_move_lines |= expense.invoice.move_id.line_ids.filtered(\n lambda x: x.account_id == expense.invoice.account_id and\n x.credit == abs(round(expense.invoice.residual, 2)))\n if len(c_move_lines) != 2:\n raise exceptions.Warning(\n _('Cannot reconcile supplier invoice payable with '\n 'generated line. Please check amounts and see '\n 'if the invoice is already added or paid. '\n 'Invoice: %s') % expense.invoice.number)\n c_move_lines.reconcile()\n return res", "def create_invoices(self, cr, uid, ids, context=None):\n group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hmtk_ndk_sale_customization', 'group_full_invoicing')[1]\n group = self.pool.get('res.groups').browse(cr, uid, group_id, context=context)\n picking_obj = self.pool.get('stock.picking')\n full_invoice_uids = [x.id for x in group.users]\n sale_obj = self.pool.get('sale.order')\n act_window = self.pool.get('ir.actions.act_window')\n wizard = self.browse(cr, uid, ids[0], context)\n sale_ids = context.get('active_ids', [])\n picking_id = picking_obj.search(cr, uid, [('sale_id', '=', sale_ids[0])], context=context)\n if wizard.advance_payment_method == 'all':\n if picking_id:\n picking = picking_obj.browse(cr, uid, picking_id, context=context)[0]\n if uid in full_invoice_uids or picking.state == 'done':\n # create the final invoices of the active sales orders\n return super(sale_advance_payment_inv,self).create_invoices(cr, uid, ids, context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You do not have permissions for full invoicing. You can create partial invoice. Select option other than Invoice the whole sales order.'))\n elif uid in full_invoice_uids:\n return super(sale_advance_payment_inv,self).create_invoices(cr, uid, ids, context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('Allow full invoicing for this user under user groups to perform this action.')) \n else:\n return super(sale_advance_payment_inv,self).create_invoices(cr, uid, ids, context=context)", "def update_sales_team_in_account_invoice(self):\n invoice_obj = self.env['account.invoice']\n sales_invoices = invoice_obj.search([\n ('partner_id', '!=', False),\n ('team_id', '=', False),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n for sale_inv in sales_invoices:\n if sale_inv.partner_id:\n if sale_inv.partner_id.team_id:\n sale_inv.write({\n 'team_id': sale_inv.partner_id.team_id.id or False\n })\n elif sale_inv.partner_id.parent_id and \\\n sale_inv.partner_id.parent_id.team_id:\n sale_inv.write({\n 'team_id':\n sale_inv.partner_id.parent_id.team_id.id or False\n })", "def refund(self):\n clone_list = []\n line_obj = self.env['pos.order.line']\n pos_payment_obj = self.env['pos.make.payment']\n account_payment_obj = self.env['account.bank.statement']\n account_payment_line_obj = self.env['account.bank.statement.line']\n stock_return_picking_obj = self.env['stock.return.picking']\n main_order_state = False\n\n for order in self:\n # order.customer_refund_create()\n\n if order.state == 'done':\n main_order_state = order.state\n main_order_move = order.account_move.id\n\n if order.tax_invoice:\n raise UserError(_('!!! The order have Tax Invoice No already. !!!'))\n if order.is_return_order:\n raise UserError(_('The order is already refunded'))\n order.is_return_order = True\n order.refund_user_id = self.env.user.id\n\n if order.session_id.state == 'closed':\n raise UserError(_('!!! This session %s is closed !!!' % order.session_id.name))\n\n clone_id = order.copy({\n 'name': order.name + ' REFUND', # not used, name forced by create\n 'picking_id': order.picking_id.id,\n })\n\n for order_line in clone_id.lines:\n order_line.write({\n 'qty': -order_line.qty\n })\n\n clone_id._amount_all()\n\n ctx = self._context.copy()\n ctx.update({\n 'active_id': clone_id.id\n })\n\n order_obj = self.env['pos.order']\n active_id = ctx and ctx.get('active_id', False)\n if active_id:\n order_id = order_obj.browse(active_id)\n\n account_statement = None\n for statement_id in order.statement_ids:\n account_statement = statement_id.statement_id.id\n statement_id.copy({\n 'amount': statement_id.amount * -1,\n 'pos_statement_id': order_id.id,\n 'statement_id': account_statement,\n 'journal_id': statement_id.journal_id.id\n })\n\n if order_id.test_paid():\n order_id.write({'state': 'paid'})\n\n order_id.create_picking()\n\n clone = clone_id\n\n for statement_id in clone.statement_ids:\n amount_old = statement_id.amount\n amount = amount_old * -1\n statement_old_ids = account_payment_line_obj.search(([\n ('pos_statement_id', '=', order.id),\n ('amount', '=', amount)\n ]))\n if statement_old_ids:\n for statement_old_id in statement_old_ids:\n # account_payment_line = account_payment_line_obj.browse(statement_old_ids[0])\n statement_id.write({\n 'journal_id': statement_old_id.journal_id.id\n })\n\n clone.write({\n 'check_amount': True,\n })\n\n if main_order_state:\n ctx = self._context.copy()\n company_id = clone.session_id.config_id.company_id.id\n ctx.update(\n {'force_company': company_id, 'company_id': company_id}\n )\n self._create_account_move_line(\n [clone.id],\n clone.session_id,\n main_order_move,\n )\n abs = {\n 'name': _('Return Products'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.order',\n 'res_id': clone.id,\n 'view_id': False,\n 'context': self._context,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n }\n return abs", "def generate_invoice(self):\n if self.state == 'done' and self.sale_picking_line_ids:\n invoice = self.env['account.invoice'].create({\n 'partner_id': self.sale_order_id.partner_invoice_id.id,\n 'partner_shipping_id': self.sale_order_id.partner_shipping_id.id,\n 'sale_picking_id': self.id,\n })\n invoice._onchange_partner_id()\n invoice.write({\n 'payment_term_id': self.sale_order_id.payment_term_id.id if self.sale_order_id else False,\n 'origin': self.name,\n 'currency_id': self.sale_order_id.currency_id.id if self.sale_order_id else False,\n })\n for product_id, lines in groupby(self.sale_picking_line_ids, lambda l: l.product_id):\n lines = list(lines)\n # AMH#-- product uom (traitment) ...\n qty = sum([l.nbr_carton for l in lines])\n result = self.to_create_invoice_line(invoice, product_id)\n if len(result) == 1:\n result = result[0]\n invoice_line = self.env['account.invoice.line'].create({\n 'invoice_id': invoice.id,\n 'product_id': product_id.id,\n 'quantity': qty,\n 'name': result['name'],\n 'uom_id': result['uom_id'],\n 'account_id': result['account_id'],\n 'price_unit': result['price_unit'],\n 'invoice_line_tax_ids': [(6, 0, result['taxes'])],\n })\n invoice_line._onchange_product_id()\n invoice_line.write({\n 'name': result['name'],\n 'uom_id': result['uom_id'],\n 'price_unit': result['price_unit'],\n 'invoice_line_tax_ids': [(6, 0, result['taxes'])],\n })\n self.account_invoice_id = invoice", "def postPendingInvoices(self, REQUEST=None):\n workflowTool = getToolByName(self, \"portal_workflow\")\n for invoice in self.getPendingInvoices():\n workflowTool.doActionFor(invoice, \"post\")", "def update_sales_person_in_account_invoice(self):\n \"\"\"Due to the studio fields that reference is missing need to fix.\"\"\"\n invoice_obj = self.env['account.invoice']\n cust_invs = invoice_obj.search([\n ('partner_id', '!=', False),\n ('user_id', '=', False),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n for cust_inv in cust_invs:\n if cust_inv.partner_id.user_id:\n cust_inv.write({\n 'user_id': cust_inv.partner_id.user_id.id or False\n })\n elif cust_inv.partner_id.parent_id and \\\n cust_inv.partner_id.parent_id.user_id:\n cust_inv.write({\n 'user_id':\n cust_inv.partner_id.parent_id.user_id.id or False\n })", "def post(self):\n for rec in self:\n\n if rec.state != 'draft':\n raise UserError(_(\"Only a draft payment can be posted.\"))\n\n if any(inv.state != 'open' for inv in rec.invoice_ids):\n raise ValidationError(_(\"The payment cannot be processed because the invoice is not open!\"))\n\n # keep the name in case of a payment reset to draft\n if not rec.name:\n # Use the right sequence to set the name\n if rec.payment_type == 'transfer':\n sequence_code = 'account.payment.transfer'\n else:\n if rec.partner_type == 'customer':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.customer.invoice'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.customer.refund'\n if rec.partner_type == 'supplier':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.supplier.refund'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.supplier.invoice'\n rec.name = self.env['ir.sequence'].with_context(ir_sequence_date=rec.payment_date).next_by_code(sequence_code)\n if not rec.name and rec.payment_type != 'transfer':\n raise UserError(_(\"You have to define a sequence for %s in your company.\") % (sequence_code,))\n\n # Create the journal entry\n amount = rec.amount * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n amount1 = rec.amount1 * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n amount2 = rec.amount2 * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n amount3 = rec.amount3 * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n amount4 = rec.amount4 * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n amount5 = rec.amount5 * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n #amount_total = amount + amount1 + amount2 + amount3\n move = rec.with_context(amount1=amount1,amount2=amount2,amount3=amount3,amount4=amount4,amount5=amount5)._create_payment_entry(amount)\n persist_move_name = move.name\n\n # In case of a transfer, the first journal entry created debited the source liquidity account and credited\n # the transfer account. Now we debit the transfer account and credit the destination liquidity account.\n if rec.payment_type == 'transfer':\n transfer_credit_aml = move.line_ids.filtered(lambda r: r.account_id == rec.company_id.transfer_account_id)\n transfer_debit_aml = rec._create_transfer_entry(amount)\n (transfer_credit_aml + transfer_debit_aml).reconcile()\n persist_move_name += self._get_move_name_transfer_separator() + transfer_debit_aml.move_id.name\n\n rec.write({'state': 'posted', 'move_name': persist_move_name})\n return True", "def action_create_invoice(self):\n for service in self:\n if service.amount <= 0.0:\n msg = _(\n \"You can not create service invoice without amount!!\"\n \"Please add Service amount first !!\"\n )\n raise ValidationError(msg)\n\n deposit_inv_ids = self.env[\"account.move\"].search(\n [\n (\"vehicle_service_id\", \"=\", service.id),\n (\"move_type\", \"=\", \"out_invoice\"),\n (\"state\", \"in\", [\"draft\", \"open\", \"in_payment\"]),\n ]\n )\n if deposit_inv_ids:\n msg = _(\n \"Deposit invoice is already Pending\\n\"\n \"Please proceed that deposit invoice first\"\n )\n raise UserError(msg)\n\n if not service.purchaser_id:\n msg = _(\n \"Please configure Driver from vehicle or in \" \"a service order!!\"\n )\n raise UserError(msg)\n\n inv_ser_line = [\n (\n 0,\n 0,\n {\n \"name\": ustr(\n service.service_type_id and service.service_type_id.name\n )\n + \" - Service Cost\",\n \"price_unit\": service.amount,\n \"account_id\": service.vehicle_id\n and service.vehicle_id.income_acc_id\n and service.vehicle_id.income_acc_id.id\n or False,\n },\n )\n ]\n for line in service.parts_ids:\n inv_line_values = {\n \"product_id\": line.product_id and line.product_id.id or False,\n \"name\": line.product_id and line.product_id.name or \"\",\n \"price_unit\": line.price_unit or 0.00,\n \"quantity\": line.qty,\n \"account_id\": service.vehicle_id\n and service.vehicle_id.income_acc_id\n and service.vehicle_id.income_acc_id.id\n or False,\n }\n inv_ser_line.append((0, 0, inv_line_values))\n inv_values = {\n \"partner_id\": service.purchaser_id and service.purchaser_id.id or False,\n \"move_type\": \"out_invoice\",\n \"invoice_date\": service.date_open,\n \"invoice_date_due\": service.date_complete,\n \"invoice_line_ids\": inv_ser_line,\n \"vehicle_service_id\": service.id,\n \"is_invoice_receive\": True,\n }\n self.env[\"account.move\"].create(inv_values)", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context=context)\n invoice_vals.update({'partner_bank_id': order.partner_bank_id.id})\n return invoice_vals", "def update_partner_sales_person_in_account_invoice_lines(self):\n \"\"\"Due to the studio fields that reference is missing need to fix.\"\"\"\n inv_line_obj = self.env['account.invoice.line']\n inv_lines = inv_line_obj.search([\n ('x_studio_partner_salesperson', '=', False),\n ('invoice_id', '!=', False),\n ('invoice_id.partner_id', '!=', False)])\n for inv_line in inv_lines:\n if inv_line.invoice_id.partner_id.user_id:\n inv_line.write({\n 'x_studio_partner_salesperson':\n inv_line.invoice_id.partner_id.user_id.id\n })\n if not inv_line.invoice_id.partner_id.user_id and \\\n inv_line.invoice_id.partner_id.parent_id and \\\n inv_line.invoice_id.partner_id.parent_id.user_id:\n inv_line.write({\n 'x_studio_partner_salesperson':\n inv_line.invoice_id.partner_id.parent_id.user_id.id\n })", "def _prepare_invoice(self):\n self.ensure_one()\n move_type = self._context.get('default_move_type', 'in_invoice')\n journal = self.env['account.move'].with_context(default_move_type=move_type)._get_default_journal()\n if not journal:\n raise UserError(_('Please define an accounting purchase journal for the company %s (%s).') % (self.company_id.name, self.company_id.id))\n\n partner_invoice_id = self.partner_id.address_get(['invoice'])['invoice']\n invoice_vals = {\n 'ref': self.partner_ref or '',\n 'move_type': move_type,\n 'narration': self.notes,\n 'currency_id': self.currency_id.id,\n 'invoice_user_id': self.user_id and self.user_id.id,\n 'journal_id': self.picking_type_id.warehouse_id.journal_id_supplier.id,\n 'partner_id': partner_invoice_id,\n 'fiscal_position_id': (self.fiscal_position_id or self.fiscal_position_id.get_fiscal_position(partner_invoice_id)).id,\n 'payment_reference': self.partner_ref or '',\n 'partner_bank_id': self.partner_id.bank_ids[:1].id,\n 'invoice_origin': self.name,\n 'invoice_payment_term_id': self.payment_term_id.id,\n 'invoice_line_ids': [],\n 'company_id': self.company_id.id,\n 'xml_edi': self.xml_edi,\n 'file_name': self.file_name,\n }\n return invoice_vals", "def post(self):\n with_advance = self.filtered(lambda r: r.type == 'out_invoice' and\n r._l10n_mx_edi_get_advance_uuid_related())\n if not with_advance:\n return super(AccountMove, self).post()\n res = super(AccountMove, self.with_context(\n disable_after_commit=True)).post()\n for inv in with_advance:\n adv_amount, _partial_amount, _lines, _reverse_lines, _partial_line = inv._l10_mx_edi_prepare_advance_refund_fields() # noqa\n if not adv_amount:\n inv.message_post(body=_(\n '<p>The credit note was not created because the advance '\n 'was used in another invoice or it is not in this '\n 'system.</p>'\n '<p>So please, follow one of these actions:</p>'\n '<li>Cancel this invoice and remove the related advance.'\n '</li><li>Create the credit note manually.</li>'))\n continue\n refund = self.env['account.move.reversal'].with_context(\n active_ids=inv.ids, active_model='account.move').create({\n 'refund_method': 'cancel',\n 'reason': 'Aplicación de anticipos',\n 'date': inv.invoice_date, })\n refund = refund.reverse_moves()\n reverse_entries = self.search(\n [('reversed_entry_id', '=', self.id)])\n inv.message_post_with_view(\n 'l10n_mx_edi_advance.l10n_mx_edi_message_advance_refund',\n values={'self': inv, 'origin': reverse_entries},\n subtype_id=self.env.ref('mail.mt_note').id)\n return res", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def test_save_agency_invoice_draft_then_amount(self):\n test_currency = CURRENCY_CUC\n test_date = timezone.now()\n test_amount1 = 100\n test_status = STATUS_DRAFT\n\n agency_invoice = AgencyInvoice(\n agency=self.test_agency,\n date=test_date,\n currency=test_currency,\n amount=test_amount1,\n status=test_status)\n\n agency_invoice = FinanceServices.save_agency_invoice(\n user=self.test_user,\n agency_invoice=agency_invoice)\n\n # document data auto filled\n self.assertDocument(agency_invoice, DOC_TYPE_AGENCY_INVOICE, test_currency)\n\n # one finantial history created\n finantials = agency_invoice.finantialdocumenthistory_set\n self.assertEqual(finantials.count(), 1)\n\n # finantial history info\n finantial = finantials.first()\n self.assertFinantialHistory(\n test_finantial_history=finantial, test_document=agency_invoice, test_user=self.test_user,\n test_old_status=None, test_new_status=test_status)\n\n test_amount2 = 50\n\n agency_invoice.amount = test_amount2\n\n agency_invoice = FinanceServices.save_agency_invoice(\n user=self.test_user,\n agency_invoice=agency_invoice)\n\n # document data auto filled\n self.assertDocument(agency_invoice, DOC_TYPE_AGENCY_INVOICE, test_currency)\n\n # no aditional finantial history\n finantials = agency_invoice.finantialdocumenthistory_set\n self.assertEqual(finantials.count(), 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Return the standard path to the shared area on the current platform.
def shared_area_path() -> str: try: return os.environ["OITG_SHARED_AREA"] except KeyError: pass if os.name == "nt": # Windows return "Z:\\" if os.name == "unix" or os.name == "posix": # Linux / OSX / ... return os.path.expanduser("~/steaneShared/") raise Exception("Unknown OS")
[ "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def get_root_path():\n\n return \"\" if PLATFORM == \"windows\" else \"/\"", "def path_extern_mounts(self) -> PurePath:\n return self.path_extern_supervisor / MOUNTS_FOLDER", "def setting_platform_workdir ():\n homeFolder = ''\n folderSymbol = ''\n # linux\n if os.name == 'posix':\n homeFolder = '/home/neod-anderjon/Pictures/Crawler/'\n folderSymbol = '/'\n # windows\n elif os.name == 'nt':\n homeFolder = 'E:\\\\Workstation_Files\\\\Pictures\\\\Comic\\\\IllustratorDesign\\\\Crawler\\\\'\n folderSymbol = '\\\\'\n else:\n pass\n\n return homeFolder, folderSymbol", "def _get_mount_path(self, connection_info):\n share = self._normalize_export(connection_info['data']['export'])\n return os.path.join(self._get_mount_point_base(),\n utils.get_hash_str(share))", "def _getSocketPath():\n return f'{CmdSsh._getSshDir()}/soos-%r@%h-%p'", "def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")", "def path(cls):\n from os.path import sep, join, exists\n from os import environ\n return join(environ.get(\"SystemRoot\", join(\"C:\", sep, \"Windows\")), \"System32\", \"mpclaim.exe\")", "def get_box_pathway():\n import os\n import sys\n sys.dont_write_bytecode = True\n user_env = os.environ['USERPROFILE']\n os.chdir(user_env)\n directory_list = os.listdir(user_env)\n Box_boolean = 'Box' in directory_list\n Box_Sync_boolean = 'Box Sync' in directory_list\n if Box_boolean is False and Box_Sync_boolean is False:\n raise ValueError('Box or Box Sync is not in your pathway')\n elif Box_boolean is True and Box_Sync_boolean is True:\n raise ValueError('Program does not know whether to distinguish Box or Box Sync')\n else:\n if Box_boolean is True:\n return user_env + '\\Box\\\\'\n elif Box_Sync_boolean is True:\n return user_env + '\\Box Sync\\\\'", "def getBuildRoot(self):\n\n return self.hostOsPath(os.getcwd())", "def pathToBaseNanoporeDir():\n import marginAlign\n i = absSymPath(__file__)\n return os.path.split(os.path.split(os.path.split(i)[0])[0])[0]", "def get_home_path(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHomePath', self.handle)", "def windows_path(self):\n return self._windows_path.replace(\"\\\\\", \"/\")", "def _get_device_base(self):\n site_path = self.config.get('site_path')\n if not site_path:\n return None\n clean_mac = self.target_mac.replace(':', '')\n dev_path = os.path.abspath(os.path.join(site_path, 'mac_addrs', clean_mac))\n if not os.path.isdir(dev_path):\n self._create_device_dir(dev_path)\n return dev_path", "def _get_lsp_primary_path(self):\n return self.__lsp_primary_path", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def relative_mount_path(self) -> str:\n return pulumi.get(self, \"relative_mount_path\")", "def os_path(self, **kw):\n with_drive = kw.get(\"with_drive\", True)\n if os.name == \"nt\":\n return self.windows_path(with_drive=with_drive)\n return self.posix_path(with_drive=with_drive)", "def mount_path(self) -> str:\n return pulumi.get(self, \"mount_path\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the path to the given users analysis directory on the shared area (``/Users//analysis``).
def analysis_root_path(user: Optional[str] = None) -> str: if user is None: user = _get_user() return os.path.join(shared_area_path(), "Users", user, "analysis")
[ "def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str:\n if day is None:\n day = date.today().isoformat()\n if user is None:\n user = _get_user()\n path = os.path.join(analysis_root_path(user=user), day)\n\n if not os.access(path, os.R_OK):\n # If the dir does not exist, create it\n os.mkdir(path)\n\n return path", "def get_mobileinsight_analysis_path():\n\n mobileinsight_path = get_mobileinsight_path()\n\n if not mobileinsight_path:\n return None\n\n return os.path.join(mobileinsight_path, \"analysis\")", "def af_user_directory():\n return os.path.abspath(os.path.join(os.path.expanduser('~'), 'af'))", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def getAnalysisDir(path):\n\n if not op.isdir(path):\n path = op.dirname(path)\n\n while path not in (op.sep, ''):\n if isMelodicDir(path):\n return path\n path = op.dirname(path)\n\n return None", "def get_user_path(user, exact=False):\n FM_ROOT = settings.FILE_MANAGER_ROOT\n if user.is_superuser and not exact:\n return FM_ROOT\n else:\n return os.path.join(FM_ROOT, user.username)", "def getUserExportDirectory():\n\n\tworkspace = cmds.workspace(q=True, rd=True)\n\tuser = os.environ[\"USER\"]\n\treturn os.path.join(workspace, EXPORT_DIRECTORY.replace(USER_HOOK, user))", "def get_user_settings_dir():\n settings_dir = os.environ.get(\"JUPYTERLAB_SETTINGS_DIR\")\n settings_dir = settings_dir or pjoin(jupyter_config_dir(), \"lab\", \"user-settings\")\n return osp.abspath(settings_dir)", "def determine_userpath(binary: Path) -> Path:\n user = Path.home() / 'AppData'\n user = user / 'Local'\n if '.research' in binary.name:\n user = user / 'BeamNG.research'\n elif '.tech' in binary.name:\n user = user / 'BeamNG.tech'\n else:\n user = user / 'BeamNG.drive'\n logger.debug(f'Userpath is set to {user.as_posix()}')\n return user", "def get_users_dir(self):\n return os.path.abspath(self._datadict[Environment.USERS_DIR])", "def analysis_path(\n project: str, location: str, conversation: str, analysis: str,\n ) -> str:\n return \"projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}\".format(\n project=project,\n location=location,\n conversation=conversation,\n analysis=analysis,\n )", "def microsalt_qc_pass_run_dir_path(\n microsalt_qc_pass_lims_project: str, microsalt_analysis_dir: Path\n) -> Path:\n return Path(microsalt_analysis_dir, microsalt_qc_pass_lims_project)", "def getFSUserDir(self):\n if not self.authorised:\n raise AuthError(401,\"I am sorry, but you are not authorised\")\n\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"screenName\"]:\n fsDir = self.config.get(\"FileMan\",\"homedir\") + self.authJson[\"userInfo\"][\"screenName\"]\n return fsDir\n else: \n raise AuthError(500, \"Cannot determine the working directory - Liferay did not provide user's screenName\")", "def getRelativeRootExperimentPath(self):\n return userId + \"/\" + \\\n self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]", "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def userFolder():\n #path=os.path.abspath(tempfile.gettempdir()+\"/swhlab/\")\n #don't use tempdir! it will get deleted easily.\n path=os.path.expanduser(\"~\")+\"/.swhlab/\" # works on windows or linux\n # for me, path=r\"C:\\Users\\swharden\\.swhlab\"\n if not os.path.exists(path):\n print(\"creating\",path)\n os.mkdir(path)\n return os.path.abspath(path)", "def shared_area_path() -> str:\n\n try:\n return os.environ[\"OITG_SHARED_AREA\"]\n except KeyError:\n pass\n\n if os.name == \"nt\": # Windows\n return \"Z:\\\\\"\n if os.name == \"unix\" or os.name == \"posix\": # Linux / OSX / ...\n return os.path.expanduser(\"~/steaneShared/\")\n raise Exception(\"Unknown OS\")", "def reporting_app_data(self):\n return os.path.abspath('reporting_app_data')", "def get_user_data_path():\n current_directory = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(current_directory, 'emergency_fund_info.json')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the path to the analysis directory for the given day, defaulting to today. The analysis directory is intended to be used as working space for analysing data while it is taken, so that the code can easily be found again later if the data or conclusions reached are reexamined. If the directory does not exist, it is created.
def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str: if day is None: day = date.today().isoformat() if user is None: user = _get_user() path = os.path.join(analysis_root_path(user=user), day) if not os.access(path, os.R_OK): # If the dir does not exist, create it os.mkdir(path) return path
[ "def set_up_directory(day):\n this_dir = os.path.dirname(__file__)\n new_dir = os.path.join(this_dir, 'day' + str(day))\n with contextlib.suppress(FileExistsError):\n os.mkdir(new_dir)\n new_file_name = os.path.join(new_dir, 'day' + str(day) + '.py')\n template_file_name = os.path.join(this_dir, 'template.py')\n if not(os.path.exists(new_file_name)):\n shutil.copy(template_file_name, new_file_name)\n return new_dir", "def get_day_data_path(self, days_ago=0):\n home = os.environ.get('USERPROFILE').replace('\\\\', '/')\n self.data_dir= os.path.join(home, 'TimeData')\n if not os.path.isdir(self.data_dir):\n mkdir(self.data_dir)\n today_filename = os.path.join(\n self.data_dir,\n (datetime.now()-timedelta(days=days_ago)).strftime('%Y-%m-%d.json'))\n return today_filename", "def get_datedir_path(self, date):\n return os.path.join(str(date.year), str(date.month), str(date.day))", "def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path", "def _get_new_measurement_path() -> pathlib.Path:\n today = strftime(\"%Y%m%d\")\n today_path = DATA_DIR / today\n new_path = get_unique_path(today_path, 'measurement_{:03d}')\n return new_path", "def getAnalysisDir(path):\n\n if not op.isdir(path):\n path = op.dirname(path)\n\n while path not in (op.sep, ''):\n if isMelodicDir(path):\n return path\n path = op.dirname(path)\n\n return None", "def journal_today():\n today = pendulum.today()\n LOG.debug('Today: %s', today)\n\n path = PATH_FORMAT.format(year=today.year, month=today.month, date=today.to_date_string())\n path = pathlib.Path(path).expanduser()\n LOG.debug('Path of today: %s', path)\n\n parent = path.parent\n if not parent.exists():\n parent.mkdir(parents=True, exist_ok=True)\n LOG.debug('Created new path: %s', parent)\n\n if not path.exists():\n shutil.copyfile(TEMPLATE_PATH, path)\n\n return path", "def prepare_folder(self) -> str:\n base_folder = self.config['info']['folder']\n today_folder = f'{datetime.today():%Y-%m-%d}'\n folder = os.path.join(base_folder, today_folder)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n return folder", "def make_folder_for_today(log_dir):\n now = datetime.datetime.now()\n sub_folders_list = ['{0:04d}'.format(now.year),\n '{0:02d}'.format(now.month),\n '{0:02d}'.format(now.day)]\n folder = log_dir\n for sf in sub_folders_list:\n folder = os.path.join(folder, sf)\n if not os.path.exists(folder):\n os.makedirs(folder)\n return folder", "def makedir(dirpath=None):\n\n # if no path is given, pick one:\n if dirpath is None:\n now = datetime.datetime.now()\n dirname = now.strftime(\"%Y_%b_%d_%Hh%M\")\n dirpath = os.path.join(\"results\", dirname)\n\n # if no such directory exists, create:\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n \n return dirpath", "def _check_or_create_dir(self):\n # update today_dir\n date_now = datetime.now().strftime(self.cfg['dir_format'])\n self.today_dir = os.path.join(self.cfg['working_directory'], date_now)\n if not os.path.isdir(self.today_dir):\n for d in self.cfg['file_types'].keys():\n os.path.os.makedirs(os.path.join(self.today_dir, d))\n self._clear_empty_folders()", "def archive_day(self, day):\n xml_cleanup = self.get_config_settings('xml_cleanup') if isinstance(self.get_config_settings('xml_cleanup'), bool) else False\n if xml_cleanup is True:\n path = os.path.abspath(self.download_filepath + '/../' + day)\n if os.path.isdir(path):\n self.logger.info('Target path is an existing directory, archiving and removing: ' + path)\n try:\n self.archive_folder(path, day)\n self.logger.info('Successfully archived: ' + path)\n except:\n self.logger.warning('Error while trying to archive path: ' + path)\n\n try:\n shutil.rmtree(path)\n self.logger.info('Successfully deleted: ' + path)\n except:\n self.logger.warning('Error while trying to delete path: ' + path)\n else:\n self.logger.info('Target path is not an existing directory, nothing to archive: ' + path)\n else:\n self.logger.info('XML cleanup is disabled in the settings')", "def get_filtered_data_path(scans_data_dir, date=None):\n if date:\n if os.path.exists:\n return os.path.join(scans_data_dir, date, 'filtered'), date\n else:\n raise ValueError(\"Filtered data requested for {} but file does not exist at {}\".format(\n date, scans_data_dir))\n else:\n dir_list = get_dirs(scans_data_dir)\n if len(dir_list) == 0:\n print(\"No data found in {}. Please run scanner first\".format(scans_data_dir))\n else:\n date = dir_list[0]\n return os.path.join(scans_data_dir, date, 'filtered'), date", "def simdir(night='', mkdir=False):\n dirname = os.path.join(os.getenv('DESI_SPECTRO_SIM'), os.getenv('PIXPROD'), night) \n if mkdir and not os.path.exists(dirname):\n os.makedirs(dirname)\n \n return dirname", "def _get_directory(self):\n directory = os.environ.get(\"EEMETER_WEATHER_CACHE_DIRECTORY\",\n os.path.expanduser('~/.eemeter/cache'))\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory", "def day_name():\n file_path = os.path.dirname(__file__)\n day_path = os.path.normpath(os.path.join(file_path, '..'))\n return os.path.basename(day_path)", "def _get_cal_path(instrument):\n # Directory 2 steps up from where code is located\n pkgpath = (os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n + os.path.sep)\n # Join in the instrument name\n caldata = os.path.join(pkgpath, 'calibration',\n 'data', instrument.lower(), '')\n return caldata", "def get_day_folder_path(prefix, year, month, day):\n assert regex_match(r'^(\\d\\d)?\\d\\d$', year), \"%s is not a valid year\" % year\n assert regex_match(r'^(\\d)?\\d$', month), \"%s is not a valid month\" % month\n assert regex_match(r'^(\\d)?\\d$', day), \"%s is not a valid day\" % day\n return \"%s/year=%d/month=%d/day=%d/\" % (prefix, int(year), int(month), int(day))", "def effective_save_dir(parent_archive_dir,stat,date,archtype ='stat'):\n if archtype == '/':\n return parent_archive_dir\n \n out_save_dir = parent_archive_dir\n fff = archtype.split('/')\n year = str(date.year)\n doy = geok.dt2doy(date)\n week, dow = geok.dt2gpstime(date)\n for f in fff:\n out_save_dir = os.path.join(out_save_dir,eval(f))\n return out_save_dir" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the path to an experiment's ARTIQ results directory. The standard results path is ``/artiqResults/``.
def artiq_results_path(experiment: Optional[str] = None) -> str: path = os.path.join(shared_area_path(), "artiqResults") if experiment is None: try: experiment = os.environ["OITG_EXPERIMENT"] except KeyError: raise Exception( "No experiment supplied, and no OITG_EXPERIMENT environment key") return os.path.join(path, experiment)
[ "def get_results_dir() -> str:\n return os.path.join(os.getcwd(), RESULTS_DIR)", "def get_results_path(results_path, experiment, prefix, mode):\n if not experiment:\n raise ArgumentError('experiment cannot be empty')\n if not prefix:\n raise ArgumentError('prefix cannot be empty')\n if not prefix:\n raise ArgumentError('mode cannot be empty')\n return Path(results_path) / experiment / prefix / mode", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def get_results_path(self):", "def artifacts_directory(self):\n return self._filesystem.join(self.results_directory(),\n ARTIFACTS_SUB_DIR)", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def results_dir(filename = None):\n path = 'results'\n if os.path.isdir(path):\n if not os.access(path, os.R_OK | os.W_OK):\n raise EnvironmentError(\"{0} is not readable or writable\".format(os.path.abspath(path)))\n return os.path.join(path, filename) if filename else path\n os.mkdir(path) # raises if it fails\n return os.path.join(path, filename) if filename else path", "def _get_R_script_dir(self):\n qiime_dir = get_qiime_project_dir()\n script_dir = path.join(qiime_dir,'qiime','support_files','R')\n return script_dir", "def path(self):\n name = self.setting.system.name + \"-\" + self.setting.system.version + \"-\" + self.setting.name\n return os.path.join(self.project.path(), self.machine.name, \"results\", self.benchmark.name, name)", "def local_results(self):\n\n return self._local_results_path", "def get_sync_results_folder() -> str:\n return os.path.join(get_test_data_folder(), 'sync-results')", "def get_result_path(self):\n return logPath", "def get_qiime_temp_dir():\r\n qiime_config = load_qiime_config()\r\n qiime_config_value = qiime_config['temp_dir']\r\n if qiime_config_value is not None:\r\n result = qiime_config_value\r\n else:\r\n result = '/tmp/'\r\n return result", "def getRelativeRootExperimentPath(self):\n return userId + \"/\" + \\\n self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]", "def __instruments_absdir(self, institute: str) -> str:\n return self.__institutes_absdir() + institute + \"/instruments/\"", "def get_results_path(study_name, project_path=None, config=None):\n from borneo.config import load_project_config\n project_path = project_path or get_project_path()\n config = config or load_project_config()\n return os.path.join(project_path,\n config.LAYOUT.format(study_name=study_name,\n component=\"results\"))", "def experiment_output_path(cfg_object):\r\n sc_dir, cl_path = read_paths()\r\n\r\n return os.path.join(sc_dir, cfg_object.output_dir)", "def results(results_dir):\n\n if not isinstance(results_dir, str):\n raise TypeError(\"results_dir must be a string!\")\n\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n return results_dir", "def get_rdai_path():\n import sys, os.path\n verbose(\"discovering RDAI root directory from mbx script location\")\n # full path to script itself\n rdai_path = os.path.abspath(sys.argv[0])\n # split \"RDAI\" executable off path\n script_dir, _ = os.path.split(rdai_path)\n # split \"script\" path off executable\n distribution_dir, _ = os.path.split(script_dir)\n verbose(\"RDAI distribution root directory: {}\".format(distribution_dir))\n return distribution_dir" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
estimate an MxF user factor matrix and an FxN item factor matrix from the MxN rating matrix
def factor_mat(all_dat, f_num, iterations, regularization): # get # of users and # of items [u_num, i_num] = all_dat.shape # init user factors and item factors with random values u_fac = np.matrix(np.random.rand(u_num, f_num)) # MxF i_fac = np.matrix(np.random.rand(i_num, f_num)) # NxF # calculate the preference matrix preference = cal_preference(all_dat) # calculate the confidence matrix confidence = cal_confidence(all_dat) # recalculate the user factors and item factors using the alternating least square method for itr in range(iterations): u_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization) #print itr, "u_fac" i_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization) #print itr, "i_fac" # save the output df = pd.DataFrame(u_fac) df.to_csv("tmp/u_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') df = pd.DataFrame(i_fac.T) df.to_csv("tmp/i_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') # an MxF user factor matrix and an FxN item factor matrix return [u_fac, i_fac.T]
[ "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n self.train_set['feedback'][user][item]", "def get_user_item_matrix(datafile, user_index, product_index):\n num_users = len(user_index)\n num_items = len(product_index)\n result = np.zeros((num_users, num_items))\n num_reviews = len(datafile)\n result_dense = np.zeros((num_reviews, 3))\n for line in datafile.iterrows():\n i = line[0]\n user_id = datafile['user_id'][i]\n product_id = datafile['business_id'][i]\n user = user_index[user_id]\n product = product_index[product_id]\n rating = datafile['stars'][i]\n result[user, product] = rating\n result_dense[i, 0] = user\n result_dense[i, 1] = product\n result_dense[i, 2] = rating\n return result, result_dense", "def optimize_U(R, M, num_users, nf, lam):\n\n U = np.zeros((nf, num_users))\n\n for i in range(num_users):\n Ii = np.where(R[i] != -1)[0] # movies rated by user i\n MIi = M[:,Ii] # columns of M.T (movies) for which user i has given a rating\n nui = len(Ii) # number of movies rated by user i\n E = np.eye(nf) # identify matrix of size nf x nf\n\n Ai = np.dot(MIi, MIi.T) + lam * nui * E\n\n RiIi = R[i,Ii] # ith row vector of R but only with entries from columns rated by user i\n\n Vi = np.dot(MIi, RiIi.T)\n\n U[:,i] = np.dot(np.linalg.inv(Ai), Vi)\n\n return U", "def fit_transform(self, ratings):\n self.fit(ratings)\n predictions = self.user_feats.T.dot(self.item_feats)\n return predictions", "def predict_ratings_matrix_factorization(R, nf=1, lam=0.05, T=10):\n\n num_users, num_movies = R.shape\n\n np.random.seed(0)\n M = np.random.rand(nf, num_movies)\n M[0] = np.mean(R, axis=0)\n\n for _ in trange(T):\n U = optimize_U(R, M, num_users, nf, lam)\n M = optimize_M(R, U, num_movies, nf, lam)\n\n X = np.dot(U.T, M)\n\n return X", "def get_user_feature_matrix_p(user_dict, user_index, aspect_index, N, popularity, A_dense, Polarity):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n max = 0\n min = 1000\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n if Polarity == False:\n count_dict[aspect] += 1\n else:\n count_dict[aspect] += review[1]\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n if count > max:\n max = count\n if count < min:\n min = count\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = (((count - min)/(max - min))*5)\n\n if len(popularity) > 0:\n col = np.zeros((len(result), 1))\n result = np.append(result, col, axis=1)\n for i in range(len(result)):\n items = A_dense[A_dense[:, 0] == i][:, 1]\n items = items.astype(int)\n result[i, len(result[1]) - 1] = np.mean(popularity[items, 1])\n return result", "def nmf_recommender(user):\n user_vector = create_new_user(user, rec_type='NMF')\n user_df = pd.DataFrame(list(user_vector), index=ratings.columns)\n user_df = user_df.T\n model = load_model(package_dir + '/models/NMF_60.sav')\n Q = model.components_\n P = model.transform(user_df)\n prediciton = np.dot(P,Q)\n recommendations = pd.DataFrame(prediciton, columns=ratings.columns)\n final_recs = recommendations[(user_df == 0)].T\n final_recs.columns = ['predicted_rating']\n final_recs= final_recs['predicted_rating'].sort_values(ascending=False)\n return list(movies.loc[final_recs.index]['title'].head(10))", "def fit(self, ratings):\n self.ratings = ratings\n rmse = float('inf')\n diff = rmse\n self.item_feats = self.rand.rand(self.rank * self.ratings.shape[1])\\\n .reshape((self.rank, self.ratings.shape[1]))\n course_avg = self.ratings.sum(0) / (self.ratings != 0).sum(0)\n course_avg[np.isnan(course_avg)] = 0\n self.item_feats[0] = course_avg\n self.user_feats = np.zeros(self.rank * self.ratings.shape[0])\\\n .reshape((self.rank, self.ratings.shape[0]))\n while diff > self.tolerance:\n self.update_users()\n self.update_items()\n true = self.ratings.data\n non_zeros = self.ratings.nonzero()\n pred = np.array(\n [\n self.predict_one(user, item)\n for user, item in zip(non_zeros[0], non_zeros[1])\n ]\n )\n new_rmse = self.root_mean_squared_error(true, pred)\n diff = rmse - new_rmse\n rmse = new_rmse", "def build_prediction_factorization(item_features, user_features, test):\n nnz_row, nnz_col = test.nonzero()\n nnz_test = list(zip(nnz_row, nnz_col))\n for row, col in nnz_test:\n current_item = item_features[:, row]\n current_user = user_features[:, col]\n prediction = current_user.T.dot(current_item)\n prediction = min(5, prediction)\n prediction = max(1, prediction)\n test[row, col] = prediction\n\n return test", "def init_matrix(self, train, num_features):\n user_matrix = np.random.rand(self.num_users, num_features)\n item_matrix = np.random.rand(num_features, self.num_items)\n item_nnz = train.getnnz(axis=0)\n item_sum = train.sum(axis=0)\n item_matrix[0, :] = item_sum / item_nnz\n return user_matrix, item_matrix", "def create_adjust_matrix(self, is_sub: bool):\n matrix = None\n if not is_sub:\n ratings = np.ones_like(self._user, dtype=np.float32)\n matrix = sp.csr_matrix(\n (ratings, (self._user, self._item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n else:\n if self.type == \"ND\":\n drop_user = self.rand_sample(\n self.n_users,\n size=int(self.n_users * self.drop_ratio),\n replace=False,\n )\n drop_item = self.rand_sample(\n self.n_items,\n size=int(self.n_items * self.drop_ratio),\n replace=False,\n )\n R_user = np.ones(self.n_users, dtype=np.float32)\n R_user[drop_user] = 0.0\n R_item = np.ones(self.n_items, dtype=np.float32)\n R_item[drop_item] = 0.0\n R_user = sp.diags(R_user)\n R_item = sp.diags(R_item)\n R_G = sp.csr_matrix(\n (\n np.ones_like(self._user, dtype=np.float32),\n (self._user, self._item),\n ),\n shape=(self.n_users, self.n_items),\n )\n res = R_user.dot(R_G)\n res = res.dot(R_item)\n\n user, item = res.nonzero()\n ratings = res.data\n matrix = sp.csr_matrix(\n (ratings, (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n elif self.type == \"ED\" or self.type == \"RW\":\n keep_item = self.rand_sample(\n len(self._user),\n size=int(len(self._user) * (1 - self.drop_ratio)),\n replace=False,\n )\n user = self._user[keep_item]\n item = self._item[keep_item]\n\n matrix = sp.csr_matrix(\n (np.ones_like(user), (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n matrix = matrix + matrix.T\n D = np.array(matrix.sum(axis=1)) + 1e-7\n D = np.power(D, -0.5).flatten()\n D = sp.diags(D)\n return D.dot(matrix).dot(D)", "def affinity_matrix(test_specs):\n\n np.random.seed(test_specs[\"seed\"])\n\n # uniform probability for the 5 ratings\n s = [(1 - test_specs[\"spars\"]) / test_specs[\"ratings\"]] * test_specs[\"ratings\"]\n s.append(test_specs[\"spars\"])\n P = s[::-1]\n\n # generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items\n X = np.random.choice(\n test_specs[\"ratings\"] + 1, (test_specs[\"users\"], test_specs[\"items\"]), p=P\n )\n\n Xtr, Xtst = numpy_stratified_split(\n X, ratio=test_specs[\"ratio\"], seed=test_specs[\"seed\"]\n )\n\n return Xtr, Xtst", "def forward(self, user, item):\n item_emb = self.product_factors(item.view(-1)) + self.product_bias(\n item.view(-1)\n )\n user_emb = self.user_factors(user.view(-1)) + self.user_bias(user.view(-1))\n mat_mult = (item_emb * user_emb).sum(1)\n\n return mat_mult", "def init_mf(train, num_features):\n num_items, num_users = train.shape\n\n user_features = np.random.rand(num_features, num_users) / num_users\n user_features[0, :] = np.ones((num_users,))\n\n item_features = np.random.rand(num_features, num_items) / num_items\n item_features[0, :] = sp.csr_matrix.mean(train, axis=1).reshape(num_items, )\n\n return user_features, item_features", "def get_utility_matrix():\n matrix = np.zeros((USERS, MOVIES))\n with open(\"dataset/ratings.dat\", 'r') as ratings:\n for rating in ratings:\n comp = rating.split('::')\n matrix[int(comp[0]) - 1][int(comp[1]) - 1] = int(comp[2])\n return matrix", "def recommend_NMF(new_user,movies_num,movies_ratings):\n list_id_movies = movies_ratings['movieId'].unique()\n nmf,Q = load_NMF_model()\n new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T\n new_user_vector_filled = new_user_vector.fillna(3)\n #calculate Matrix P (Genres)\n P = nmf.transform(new_user_vector_filled)\n #make predictions\n predictions = np.dot(P,Q)\n recommendations = pd.DataFrame(predictions.reshape(-1), index=list_id_movies).T\n #Remove already watched movies:\n not_watched_movies_mask = np.isnan(new_user_vector)\n not_watched = recommendations[not_watched_movies_mask]\n\n top_movies_ids = not_watched.T.sort_values(by=[0], ascending=False).index[:movies_num]\n\n Top_recommended = movieId_to_title(top_movies_ids,movies_ratings) \n return Top_recommended", "def compute_matrix_uu(self):\r\n users_u = User.objects.all()\r\n # It's a symmetric matrix, we don't need to go through all values\r\n for uu in range(0, len(users_u)):\r\n u = users_u[uu]\r\n for vv in range(uu, len(users_u)):\r\n v = users_u[vv]\r\n if u.id == v.id:\r\n self.uu[u, v] = Recommendation.EMPTY_CASE\r\n else:\r\n x = []\r\n for f in self.p.get_cols():\r\n if not Recommendation.compare_float(self.p[u, f], 0.0) and self.p[u, f] > 0.0 and not Recommendation.compare_float(self.p[v, f], 0.0) and self.p[v, f] > 0.0:\r\n x.append(f)\r\n if len(x) > 0:\r\n nominator = 0.0\r\n wu2 = 0.0\r\n wv2 = 0.0\r\n for f in x:\r\n if not Recommendation.compare_float(self.w[u, f], Recommendation.EMPTY_CASE) and not Recommendation.compare_float(self.w[v, f], Recommendation.EMPTY_CASE):\r\n nominator += self.w[u, f]*self.w[v, f]\r\n wu2 += self.w[u, f]*self.w[u, f]\r\n wv2 += self.w[v, f]*self.w[v, f]\r\n denominator = sqrt(wu2)*sqrt(wv2)\r\n self.uu[u, v] = self.uu[v, u] = nominator/denominator if not Recommendation.compare_float(denominator, 0.0) else Recommendation.EMPTY_CASE", "def create_matrix_iuf(self):\r\n len_users = float(len(self.r.get_rows()))\r\n for f in self.uf.get_rows():\r\n self.iuf[f] = log10(len_users/float(self.uf[f])) if self.uf[f] != 0 else 0.0", "def predict_rating(user_id,item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of Domains for this API key.
def get_domains() -> List[str]: ret = _call_endpoint("v1/domains") # Example response: # [{'createdAt': '2016-06-25T03:08:44.000Z', # 'domain': 'mydomain.com', # 'domainId': 12345678, # 'expirationProtected': False, # 'expires': '2020-06-25T03:08:44.000Z', # 'holdRegistrar': False, # 'locked': True, # 'nameServers': None, # 'privacy': False, # 'renewAuto': True, # 'renewDeadline': '2020-08-09T03:08:44.000Z', # 'renewable': True, # 'status': 'ACTIVE', # 'transferProtected': False},] domains = [d["domain"] for d in ret] return domains
[ "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def get_domains(self):\n return self.rest_helper(\"/domains.json\")", "def domain_list(self) -> 'outputs.GetReposRepoDomainListResult':\n return pulumi.get(self, \"domain_list\")", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def get_domains(self):\n headers = {\"Accept\": \"application/json\"}\n\n return self._http_request(method='GET',\n url_suffix=\"/config-gtm/v1/domains\",\n headers=headers)", "def get_domain_list(self):\n\t\tnow = datetime.datetime.now()\n\t\texp_from = \"%04d-%02d-%02d\" % (now.year-1, now.month, now.day)\n\t\texp_to = \"%04d-%02d-%02d\" % (now.year+15, now.month, now.day)\n\t\tdomain_list = []\n\t\tfor extension in ['.com', '.au']:\n\t\t\tresult = self.query('get_domains_by_expiredate', 'domain', { 'exp_from' : exp_from, 'exp_to' : exp_to, 'limit' : 100000, 'page' : 1, 'domain': extension})\n\t\t\t[ domain_list.append(x['name']) for x in result['attributes']['exp_domains'] ]\n\t\treturn domain_list", "def domains(self):\n return DomainCollection(self.request)", "def domains(client):\n return client.domain.all()", "def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)", "def domains(self):\n return self._domains", "def getDomains(self, company):\n return self.db.getDomains(company)", "def list_zones(self, **kwargs):\n return self.client['Account'].getDomains(**kwargs)", "def get_all_domains(self):\n sql_string = \"select DomainAcc from PFAM\"\n dalObj = DAL(self.db_name, sql_string)\n results = dalObj.executeSelect()\n return results", "def domain(self):\n return self.keys()", "def list_domain_names():\n pass", "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def get_domains():\n oc = ObjectContainer()\n for domain in sorted(domain_list):\n url = 'http://www.reddit.com/domain/%s/.json' % domain\n title = 'domain/' + domain\n oc.add(DirectoryObject(key=Callback(videos,\n url=url,\n title=title),\n title=title))\n return oc", "def get_subdomains(self):\n\n response = self.call(method='getSubdomains', args=[self.domainname])\n subdomains = []\n for s in response:\n subdomain = self.subdomain(domain=self.domainname, subdomain=s)\n subdomains.append(subdomain)\n return subdomains", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetSearchDomains', self.handle))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get DNS entries for a specific domain
def get_domain_dns_records(domain): url_suffix = "v1/domains/{}/records".format(domain) ret = _call_endpoint(url_suffix) if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN": # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'} raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}") return ret
[ "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def get_dns_records(self, d):\n try:\n host = d['host']\n except KeyError:\n return { 'status': 'Error', 'error_message': 'Key host is required'} \n \n return requests.get(\"https://api.xforce.ibmcloud.com/resolve/%s\" % host, headers={\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': 'Basic %s' % self.token.decode()\n }).json()", "def lookup_domain(self, domain, nameserver=None, log_prefix = ''):\n # get the resolver to use\n if nameserver is None:\n nameserver = self.nameservers[0]\n results = {'domain': domain, 'nameserver': nameserver}\n # construct the socket to use\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(self.timeout)\n\n logging.debug(\"%sQuerying DNS enteries for \"\n \"%s (nameserver: %s).\" % (log_prefix, domain, nameserver))\n\n # construct and send the request\n request = dns.message.make_query(domain,\n dns.rdatatype.from_text(self.rtype))\n results['request'] = b64encode(request.to_wire())\n sock.sendto(request.to_wire(), (nameserver, 53))\n\n # read the first response from the socket\n reads, _, _ = select.select([sock], [], [], self.timeout)\n # if we didn't get anything, then set the results to nothing\n if reads == []:\n results['response1'] = None\n self.results[domain] = results\n return results\n response = reads[0].recvfrom(4096)[0]\n results['response1'] = b64encode(response)\n resp = dns.message.from_wire(response)\n results['response1-ips'] = self.parse_out_ips(resp)\n\n # if we have made it this far, then wait for the next response\n reads, _, _ = select.select([sock], [], [], self.timeout)\n # if we didn't get anything, then set the results to nothing\n if reads == []:\n results['response2'] = None\n self.results[domain] = results\n return results\n response = reads[0].recvfrom(4096)[0]\n results['response2'] = b64encode(response)\n resp = dns.message.from_wire(response)\n results['response2-ips'] = self.parse_out_ips(resp)\n self.results[domain] = results\n return results", "def dnscl_domain(domain_name):\n start_time = timeit.default_timer()\n ip_dict = defaultdict(int)\n domain_list = []\n line_count = 0\n\n with open(FILENAME, encoding=\"UTF-8\") as piholelog:\n for line in piholelog:\n field_index = 0\n if \"query[\" in line:\n fields = line.strip().split(\" \")\n domain_name_field = find_field(fields, field_index, \"domain\")\n ip_address = find_field(fields, field_index, \"ip_address\")\n if re.search(domain_name, domain_name_field, re.IGNORECASE):\n ip_dict[ip_address] += 1\n if domain_name:\n domain_list.append(domain_name_field)\n line_count += 1\n\n ip_list_sorted = sort_dict(ip_dict)\n elapsed_time = timeit.default_timer() - start_time\n\n print(f\"{domain_name} total queries: {line_count}\")\n print(\"ip addresses: \")\n\n for ip_address, query_count in ip_list_sorted:\n print(f\"{query_count}\\t {ip_address}\")\n\n if domain_name:\n print(\"\\ndomain names: \")\n for domain_names_found in sorted(set(domain_list)):\n print(domain_names_found)\n print(\n f\"\\nSummary: Searched {domain_name} and found {line_count}\",\n f\"queries for {len(set(domain_list))} domain names from {len(ip_dict)} clients.\",\n )\n else:\n print(\n f\"\\nSummary: Searched {domain_name} and found {line_count}\",\n f\"queries from {len(ip_dict)} clients.\",\n )\n print(f\"Query time: {round(elapsed_time, 2)} seconds\")", "def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)", "def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))", "def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())", "def infoDnsRecords(self, domainname: str) -> DNSRecordSet:\n response = self._send(self.nc_request(action=\"infoDnsRecords\", parameters={\"domainname\": domainname}))\n\n # build records\n rset = DNSRecordSet(dnsrecords=[])\n for r in response[\"dnsrecords\"]:\n dr = DNSRecord(id=int(r[\"id\"]),\n hostname=r[\"hostname\"],\n type=r[\"type\"],\n priority=int(r[\"priority\"]),\n destination=r[\"destination\"],\n deleterecord=r[\"deleterecord\"],\n state=r[\"state\"])\n\n rset.dnsrecords.append(dr)\n\n return rset", "def get(self, domain):\n return self.getHttp('domains/{:s}'.format(domain))", "def dns(self, **kwargs):\n self.logger.debug(f\"Get RealTime DNS data\")\n url_path = 'dns'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def get_domain_records(domain, server):\n this_domain_id = get_domain_id(domain, server)\n domain_records_query = 'SELECT * FROM records where domain_id=%s' % this_domain_id\n domain_records = perform_query(server.name, server.port, server.user, server.password, server.database, domain_records_query, 'query')\n return domain_records", "def get_domains():\n global domain_q\n\n fetch_data = mongo_conn.mongo_read(collection_name,{'visit_times':0}, {'domain':True,'_id':False,'visit_times':True},limit_num=None)\n for item in fetch_data:\n domain_q.put(item['domain'])", "def _get_dns_search_domain():\n # _clear()\n resolv_file = '/etc/resolv.conf'\n search_domain = 'whocares'\n with open(resolv_file, 'r') as rf:\n for rf_line in rf:\n if 'search' in rf_line:\n search_domain = rf_line.split()[1]\n break\n return search_domain", "def list_dns():\n with ShecanConfig() as conf:\n ips = conf.list_dns()\n return ips", "def getIPs(self, domain = \"localhost\"):\n # convert 'domain' to string, in case of erroneous type being passed\n domain = str(domain)\n\n # Kind warning for those who entered an IP address instead of a domain\n try: \n inet_aton(domain)\n print(\"Warning: an IP address was given instead of a domain name.\")\n except:\n pass\n\n # Try to query DNS records to populate A-Record IP list\n # Prints errors and returns None if exceptions found\n try:\n iplist = gethost(domain)[2]\n except gaierror as ge:\n if ge.errno == -2:\n print(\"Error: Domain '{}' invalid, or unknown. \"\\\n \"Please check proper spelling and format.\\n\"\\\n \"(e.g.: python dns_get_A_record_IPs.py google.com )\".format(domain))\n elif ge.errno == -3:\n print(\"Error: Domain '{}' unreachable. Please check your connection.\".format(domain))\n return None\n except timeout:\n print(\"Error: Connection to {} timed out.\".format(domain))\n return None\n\n return iplist", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def list_domain_names():\n pass", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print each domain and its DNS records (for domains linked to this API key).
def print_all_dns_records(): for domain in sorted(get_domains()): dns_records = get_domain_dns_records(domain) print(domain) pprint(dns_records) print("*" * 50) # TODO: poor man's rate limiter. improve? time.sleep(2)
[ "def print_all_domain_lists(self):\n for list in self.domainCollection:\n print(list.url_identifier)", "def display_elb_dns_entries():\n stack_name = get_stack_name()\n elb = get_connection(ELB)\n elb_dns_list = elb.list_domain_names(stack_name)\n for elb_dns in elb_dns_list:\n print \"\\n\\nELB name: {0} DNS: {1}\".format(elb_dns['elb_name'], elb_dns['dns_name'])", "def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def get_dns_records(self, d):\n try:\n host = d['host']\n except KeyError:\n return { 'status': 'Error', 'error_message': 'Key host is required'} \n \n return requests.get(\"https://api.xforce.ibmcloud.com/resolve/%s\" % host, headers={\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': 'Basic %s' % self.token.decode()\n }).json()", "def get_domain_dns_records(domain):\n url_suffix = \"v1/domains/{}/records\".format(domain)\n ret = _call_endpoint(url_suffix)\n if isinstance(ret, dict) and ret.get('code', None) == \"UNKNOWN_DOMAIN\":\n # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}\n raise Exception(f\"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}\")\n return ret", "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def get_all_records(self, domainname=\"quixey.be\"):\n\tall_records = []\n\tparams = {'Action': 'DescribeDomainRecords', \\\n\t 'DomainName': domainname, \\\n\t\t }\n\n\tfor resp in self.get(params, paginated=True):\n\t\tfor item in resp['DomainRecords']['Record']:\n\t\t\tall_records.append(item)\n\treturn all_records", "def print_all_servers_known_by_dns(dbr):\n nb_servers_known_by_DNS = dbr.getServers()\n print((\"There are {0} servers known by the DNS\".format(nb_servers_known_by_DNS)))\n for i in range(nb_servers_known_by_DNS):\n # getNextServer().next() returns a tuple\n server_tuple = next(dbr.getNextServer())\n print(\n \"Server {0}, name = {1}, Node name = {2}\".format(\n i + 1, server_tuple[0], server_tuple[1]\n ))\n print(\"\")", "def test_getdnsrecords(self, kasserver):\n assert kasserver.get_dns_records(\"example.com\") == self.RESPONSE_PARSED", "def list_stub_domains( args ):\n kube_dns = dump_kube_dns_cm( args.kube_dns_cm_file )\n if 'data' not in kube_dns or 'stubDomains' not in kube_dns['data']:\n return\n\n stubDomains = json.loads( kube_dns['data']['stubDomains'] )\n for domain in stubDomains:\n print \"%s:%s\" % (domain, \",\".join( stubDomains[domain] ) )", "def list_domain_names():\n pass", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def print_all_services_known_by_dns(dbr):\n nb_services_known_by_DNS = dbr.getServices(\"*\")\n print((\"There are {0} services known by the DNS\".format(nb_services_known_by_DNS)))\n for i in range(nb_services_known_by_DNS):\n # getNextService().next() returns a tuple\n service_tuple = next(dbr.getNextService())\n print(\n \"Service {0} : Type of service = {1} name = {2}, format = {3}\".format(\n i + 1, service_tuple[0], service_tuple[1], service_tuple[2]\n ))\n print(\"\")", "def list_type_A_domain(self, domain):\n r53 = self.connections.get_route53()\n # Get Zone ID\n zone = r53.get_zone(domain)\n zone_id = zone.id\n # Get all type A records\n records = r53.get_all_rrsets(hosted_zone_id=zone_id, name='A')\n for record in records:\n print(record)", "def get_domains(self):\n return self.rest_helper(\"/domains.json\")", "def get_domains():\n oc = ObjectContainer()\n for domain in sorted(domain_list):\n url = 'http://www.reddit.com/domain/%s/.json' % domain\n title = 'domain/' + domain\n oc.add(DirectoryObject(key=Callback(videos,\n url=url,\n title=title),\n title=title))\n return oc", "def get_dns(self):\n dns = []\n for id, user in self.users_by_id.items():\n if not user.dns:\n continue\n for dn in user.dns:\n dns.append(dn)\n return dns", "def showDomainname(logger):\n rc = Command.execute(logger, \"dns\", \n [DnsDomainName.DNS_DOMAIN_NAME_COMMAND_NAME]) \n return rc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a request handler class that redirects to supplied `url`
def redirect_handler_factory(): class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): self.send_response(301) domain = self.headers['host'] if ':' in domain: domain = domain.split(':')[0] self.send_header('Location', "https://" + domain + self.path) self.end_headers() return RedirectHandler
[ "def redirect_handler_factory(url):\n class RedirectHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(302)\n self.send_header('Location', url)\n self.end_headers()\n\n return RedirectHandler", "def redirect(url):", "def redirect(self, url):\n response = getattr(self.request, 'response', None)\n if response is None:\n response = HTTPFound(location=url)\n else:\n response = HTTPFound(location=url, headers=response.headers)\n return response", "def redirect(self, url):\n raise RequestRedirect(url)", "def route(routeURL):\n\tdef wrapper(HTTPObject):\n\t\tglobal urls\n\t\turls += (routeURL, HTTPObject.__name__,)\n\t\treturn HTTPObject\n\treturn wrapper", "def redirect_client(requested_uri):\n return redirect(requested_uri['value'], 301)", "def _req_wrap(self, url):\r\n\r\n self.requester.url_list = [url]\r\n self.requester.responses = []\r\n self.requester.run()\r\n\r\n return self.requester.responses[0]", "def _make_ssh_forward_handler_class(self, remote_address_):\n class Handler(_ForwardHandler):\n remote_address = remote_address_\n ssh_transport = self._transport\n logger = self.logger\n return Handler", "def _redirect(self, url):\n logger.debug('Redirecting to URL %s', url)\n segments = urllib.parse.urlparse(url)\n\n host = segments.netloc\n if host != self._host:\n self.new_connection(host)\n\n relurl = urllib.parse.urlunparse(('', '') + segments[2:])\n try:\n self._raw_get(relurl)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise DDGConnectionError(\"Failed to get '%s'.\" % url)", "def get_redirect_handler_for_site(site, request):\n\n view = queryMultiAdapter((site, request), name=\"redirect_handler\")\n if view:\n return view\n\n # Check if we have a redirect handler script in the site root\n if \"redirect_handler\" in site:\n return site[\"redirect_handler\"]\n\n return None", "def create_addon_instance(url):\n parsed_url = urlparse(url)\n \n # Search for class that can handle URL and create new instance of it\n match = next((s for s in addon_sites if parsed_url.netloc in s.HandleURLs()), None)\n new_type = type(match)\n return new_type(url, parsed_url, WOW_PATH, WOW_VERSION)", "def _UrlOpenWithRetry(request):\n return urlrequest.urlopen(request)", "def redirect(url, code=302):\n exc = status_map[code]\n raise exc(location=url).exception", "def default_handler(request):\n return launch_request_handler(request)", "def __init__(self, url, proxy=None, **kwargs):\n self.proxy = proxy\n self.query_params = urllib.parse.urlencode(kwargs)\n self.url = url if not self.query_params else f\"{url}?{self.query_params}\"\n logger.info(\"UrllibHandler initialized: url=%s, proxy=%s\", self.url, self.proxy)", "def redirect_to(request, url, permanent=True, **kwargs):\n if url is not None:\n klass = permanent and HttpResponsePermanentRedirect or HttpResponseRedirect\n quoted_kwargs = {}\n for k,v in kwargs.items():\n quoted_kwargs[k] = quote(v, \"/\")\n\n # Encoded urls confuses python templating. Properly escape the templates.\n return klass(quote(RE_QUOTE.sub(r\"%%\\1\", url) % quoted_kwargs), \"/\")\n else:\n return HttpResponseGone()", "def urlopen(url):\n logging.info(\"urlopen %s\", url)\n \n try:\n return _urlopen(url)\n except ProxyError, e:\n logging.error(\"%s - %s\", str(e), url)\n response = ProxyHTTPResponse(url, None, method=\"GET\")\n response.error_bad_gateway()\n return response", "def _find_url_handler(self, req):\n # First try - lookup in explicit (non parameterized URLs)\n if req.path in self.explicit_url_map:\n return self.explicit_url_map[req.path]\n # Second try - strip last path segment and lookup in another map\n idx = req.path.rfind(b'/') + 1\n path2 = req.path[:idx]\n if len(path2) > 0 and path2 in self.parameterized_url_map:\n # Save parameter into request\n req._param = req.path[idx:].decode()\n return self.parameterized_url_map[path2]\n\n if self.catch_all_handler:\n return self.catch_all_handler\n\n # No handler found\n return (None, None)", "def endpoint_for(url, method=None, return_rule=False, follow_redirects=True):\n parsed_url = urlsplit(url)\n if not parsed_url.netloc:\n # We require an absolute URL\n return None, {}\n\n # Take the current runtime environment...\n environ = dict(request.environ)\n # ...but replace the HTTP host with the URL's host...\n environ['HTTP_HOST'] = parsed_url.netloc\n # ...and the path with the URL's path (after discounting the app path, if not\n # hosted at root).\n environ['PATH_INFO'] = parsed_url.path[len(environ.get('SCRIPT_NAME', '')) :]\n # Create a new request with this environment...\n url_request = current_app.request_class(environ)\n # ...and a URL adapter with the new request.\n url_adapter = current_app.create_url_adapter(url_request)\n\n # Run three hostname tests, one of which must pass:\n\n # 1. Does the URL map have host matching enabled? If so, the URL adapter will\n # validate the hostname.\n if current_app.url_map.host_matching:\n pass\n\n # 2. If not, does the domain match? url_adapter.server_name will prefer\n # app.config['SERVER_NAME'], but if that is not specified, it will take it from the\n # environment.\n elif parsed_url.netloc == url_adapter.server_name:\n pass\n\n # 3. If subdomain matching is enabled, does the subdomain match?\n elif current_app.subdomain_matching and parsed_url.netloc.endswith(\n '.' + url_adapter.server_name\n ):\n pass\n\n # If no test passed, we don't have a matching endpoint.\n else:\n return None, {}\n\n # Now retrieve the endpoint or rule, watching for redirects or resolution failures\n try:\n return url_adapter.match(parsed_url.path, method, return_rule=return_rule)\n except RequestRedirect as r:\n # A redirect typically implies `/folder` -> `/folder/`\n # This will not be a redirect response from a view, since the view isn't being\n # called\n if follow_redirects:\n return endpoint_for(\n r.new_url,\n method=method,\n return_rule=return_rule,\n follow_redirects=follow_redirects,\n )\n except (NotFound, MethodNotAllowed):\n pass\n # If we got here, no endpoint was found.\n return None, {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
loop and copy console>serial until config.exit_char character is found. when config.menu_char is found, interpret the next key locally.
def writer(self): menu_active = False try: while self.alive: try: char = self.console.getkey() except KeyboardInterrupt: char = '\x03' if menu_active: # Menu character again/exit char -> send itself if char in self.config.menu_char: self.serial.write(char) # send character elif char in self.config.exit_char: self.stop() break # exit app elif char in 'hH?': # h, H, ? -> Show help sys.stderr.write(self.get_help_text()) elif char in self.config.photo_char: ENV.send_image_f = "Asked by console" else: sys.stderr.write('--- unknown menu character %s ---\n' % char) menu_active = False elif char in self.config.menu_char: # next char will be for menu menu_active = True elif char == '\n' or ord(char) == 10: sys.stderr.write('\n') else: self.serial.write(char) # send character except: self.alive = False raise
[ "def writer(self):\n menu_active = False\n try:\n while self.alive:\n try:\n c = console.getkey()\n except KeyboardInterrupt:\n c = '\\x03'\n if menu_active:\n if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself\n self.serial.write(c) # send character\n if self.echo:\n sys.stdout.write(c)\n elif c == '\\x15': # CTRL+U -> upload file\n sys.stderr.write('\\n--- File to upload: ')\n sys.stderr.flush()\n console.cleanup()\n filename = sys.stdin.readline().rstrip('\\r\\n')\n if filename:\n try:\n file = open(filename, 'r')\n sys.stderr.write('--- Sending file %s ---\\n' % filename)\n while True:\n line = file.readline().rstrip('\\r\\n')\n if not line:\n break\n self.serial.write(line)\n self.serial.write('\\r\\n')\n # Wait for output buffer to drain.\n self.serial.flush()\n sys.stderr.write('.') # Progress indicator.\n sys.stderr.write('\\n--- File %s sent ---\\n' % filename)\n except IOError, e:\n sys.stderr.write('--- ERROR opening file %s: %s ---\\n' % (filename, e))\n console.setup()\n elif c in '\\x08hH?': # CTRL+H, h, H, ? -> Show help\n sys.stderr.write(get_help_text())\n elif c == '\\x12': # CTRL+R -> Toggle RTS\n self.rts_state = not self.rts_state\n self.serial.setRTS(self.rts_state)\n sys.stderr.write('--- RTS %s ---\\n' % (self.rts_state and 'active' or 'inactive'))\n elif c == '\\x04': # CTRL+D -> Toggle DTR\n self.dtr_state = not self.dtr_state\n self.serial.setDTR(self.dtr_state)\n sys.stderr.write('--- DTR %s ---\\n' % (self.dtr_state and 'active' or 'inactive'))\n elif c == '\\x02': # CTRL+B -> toggle BREAK condition\n self.break_state = not self.break_state\n self.serial.setBreak(self.break_state)\n sys.stderr.write('--- BREAK %s ---\\n' % (self.break_state and 'active' or 'inactive'))\n elif c == '\\x05': # CTRL+E -> toggle local echo\n self.echo = not self.echo\n sys.stderr.write('--- local echo %s ---\\n' % (self.echo and 'active' or 'inactive'))\n elif c == '\\x09': # CTRL+I -> info\n self.dump_port_settings()\n elif c == '\\x01': # CTRL+A -> cycle escape mode\n self.repr_mode += 1\n if self.repr_mode > 3:\n self.repr_mode = 0\n sys.stderr.write('--- escape data: %s ---\\n' % (\n REPR_MODES[self.repr_mode],\n ))\n elif c == '\\x0c': # CTRL+L -> cycle linefeed mode\n self.convert_outgoing += 1\n if self.convert_outgoing > 2:\n self.convert_outgoing = 0\n self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]\n sys.stderr.write('--- line feed %s ---\\n' % (\n LF_MODES[self.convert_outgoing],\n ))\n #~ elif c in 'pP': # P -> change port XXX reader thread would exit\n elif c in 'bB': # B -> change baudrate\n sys.stderr.write('\\n--- Baudrate: ')\n sys.stderr.flush()\n console.cleanup()\n backup = self.serial.baudrate\n try:\n self.serial.baudrate = int(sys.stdin.readline().strip())\n except ValueError, e:\n sys.stderr.write('--- ERROR setting baudrate: %s ---\\n' % (e,))\n self.serial.baudrate = backup\n else:\n self.dump_port_settings()\n console.setup()\n elif c == '8': # 8 -> change to 8 bits\n self.serial.bytesize = serial.EIGHTBITS\n self.dump_port_settings()\n elif c == '7': # 7 -> change to 8 bits\n self.serial.bytesize = serial.SEVENBITS\n self.dump_port_settings()\n elif c in 'eE': # E -> change to even parity\n self.serial.parity = serial.PARITY_EVEN\n self.dump_port_settings()\n elif c in 'oO': # O -> change to odd parity\n self.serial.parity = serial.PARITY_ODD\n self.dump_port_settings()\n elif c in 'mM': # M -> change to mark parity\n self.serial.parity = serial.PARITY_MARK\n self.dump_port_settings()\n elif c in 'sS': # S -> change to space parity\n self.serial.parity = serial.PARITY_SPACE\n self.dump_port_settings()\n elif c in 'nN': # N -> change to no parity\n self.serial.parity = serial.PARITY_NONE\n self.dump_port_settings()\n elif c == '1': # 1 -> change to 1 stop bits\n self.serial.stopbits = serial.STOPBITS_ONE\n self.dump_port_settings()\n elif c == '2': # 2 -> change to 2 stop bits\n self.serial.stopbits = serial.STOPBITS_TWO\n self.dump_port_settings()\n elif c == '3': # 3 -> change to 1.5 stop bits\n self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE\n self.dump_port_settings()\n elif c in 'xX': # X -> change software flow control\n self.serial.xonxoff = (c == 'X')\n self.dump_port_settings()\n elif c in 'rR': # R -> change hardware flow control\n self.serial.rtscts = (c == 'R')\n self.dump_port_settings()\n else:\n sys.stderr.write('--- unknown menu character %s --\\n' % key_description(c))\n menu_active = False\n elif c == MENUCHARACTER: # next char will be for menu\n menu_active = True\n elif c == EXITCHARCTER: \n self.stop()\n break # exit app\n elif c == '\\n':\n self.serial.write(self.newline) # send newline character(s)\n if self.echo:\n sys.stdout.write(c) # local echo is a real newline in any case\n sys.stdout.flush()\n else:\n self.serial.write(c) # send character\n if self.echo:\n sys.stdout.write(c)\n sys.stdout.flush()\n except:\n self.alive = False\n raise", "def keypress() :\n last = 0\n tty.setcbreak(sys.stdin)\n\n try :\n while True :\n code = ord(sys.stdin.read(1)) \n if (code== 53 and last==91 or code==68) : \n key = \"left\"\n elif (code==98 or code==66) :\n key = \"down\"\n elif (code==54 or code==67) :\n key = \"right\"\n elif (code==49 or code==65 or code==69) :\n key = \"up\"\n else :\n key = None\n last=code\n if not(key is None) :\n yield key\n\n finally :\n #turn echo back on \n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n old[3] = old[3] | termios.ECHO\n termios.tcsetattr(fd, termios.TCSADRAIN, old)", "def keyboard_control(key, ser):\n fast_spd = '225'\n slow_spd = '100'\n\n if key == 'w' :\n data = f'<F,{fast_spd}>'\n elif key == 'W' :\n data = f'<F,{slow_spd}>'\n\n elif key == 's' :\n data = f'<B,{fast_spd}>'\n elif key == 'S' :\n data = f'<B,{slow_spd}>'\n\n elif key == 'd' :\n data = f'<R,{fast_spd}>'\n elif key == 'D' :\n data = f'<R,{slow_spd}>'\n\n elif key == 'a' :\n data = f'<L,{fast_spd}>'\n elif key == 'A' :\n data = f'<L,{slow_spd}>'\n \n elif key == 'x' :\n exit()\n \n else:\n data = STOP_COMMAND\n \n SendData(ser, data)", "async def key_loop(self):\n log.info(\"[key_loop] starting\")\n if sys.platform != \"win32\":\n self.old_settings = termios.tcgetattr(sys.stdin.fileno())\n tty.setraw(sys.stdin.fileno())\n async for char in stream_as_char_generator(self.loop, sys.stdin):\n if ord(char) in (3, 4):\n # Ctrl+c (sigint) & Ctrl+d (eof) get captured as a non-printing\n # characters with ASCII code 3 & 4 respectively. Quit\n # gracefully.\n self.quit(display_reconnect=True)\n elif ord(char) == 26:\n # Ctrl+z gets captured as a non-printing character with ASCII\n # code 26. Send SIGSTOP and reset the terminal.\n self.reset_stdin()\n os.kill(os.getpid(), signal.SIGSTOP)\n if sys.platform != \"win32\":\n self.old_settings = termios.tcgetattr(sys.stdin.fileno())\n tty.setraw(sys.stdin.fileno())\n\n for listener in self.listeners:\n await listener.key_press(char)\n self.reset_stdin()", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def get_key():\n\tinput_key: str = \"\"\n\ttry:\n\t\twhile not False:\n\t\t\twith Raw(sys.stdin):\n\t\t\t\tif not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag\n\t\t\t\t\tcontinue\n\t\t\t\tinput_key += sys.stdin.read(1) #* Read 1 key safely with blocking on\n\t\t\t\tif input_key == \"\\033\": #* If first character is a escape sequence keep reading\n\t\t\t\t\twith Nonblocking(sys.stdin): #* Set non blocking to prevent read stall\n\t\t\t\t\t\tinput_key += sys.stdin.read(20)\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<\"):\n\t\t\t\t\t\t\t_ = sys.stdin.read(1000)\n\t\t\t\tprint(\"INPUT: \"+input_key.replace(\"\\033\",\"<ESC>\"))\n\t\t\t\tif input_key == \"\\033\" or input_key == \"q\": #* Key is \"escape\" key if only containing \\033\n\t\t\t\t\tbreak\n\t\t\t\telif input_key.startswith((\"\\033[<0;\", \"\\033[<35;\", \"\\033[<64;\", \"\\033[<65;\")): #* Detected mouse event\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint((int(input_key.split(\";\")[1]), int(input_key.split(\";\")[2].rstrip(\"mM\"))))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<35;\"):\n\t\t\t\t\t\t\tprint(\"mouse Move\") #* Detected mouse move in mouse direct mode\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<64;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll UP\") #* Detected mouse scroll up\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<65;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll DOWN\") #* Detected mouse scroll down\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<0;\") and input_key.endswith(\"m\"):\n\t\t\t\t\t\t\tprint(\"mouse Click Release\") #* Detected mouse click release\n\t\t\t\tinput_key = \"\"\n\texcept Exception as e:\n\t\tprint(f'EXCEPTION: Input thread failed with exception: {e}')", "def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "def handle_serial_input(serial_port, down_map, up_map):\n ser = serial.Serial(serial_port)\n ctrl = 0\n while True:\n flag = ser.read()[0]\n value = ser.read()[0]\n\n combination = \"\"\n delay = 0\n if flag & 2: # handle control button changes\n ctrl = value\n if flag & 1: # handle press\n if (ctrl, value) in down_map:\n combination = down_map[(ctrl, value)]['combination']\n delay = down_map[(ctrl, value)]['delay']\n elif not flag: # handle release\n if (ctrl, value) in up_map:\n combination = up_map[(ctrl, value)]['combination']\n delay = up_map[(ctrl, value)]['delay']\n\n if combination:\n if delay:\n keyboard.press(combination)\n time.sleep(delay)\n keyboard.release(combination)\n else:\n keyboard.press_and_release(combination)", "def keyPress(self, event):\n c = event.char\n if c == \"\\r\":\n self.connectClicked()", "def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()", "def test_arrows():\n k_left = bytearray([27, 91, 68])\n k_right = bytearray([27, 91, 67])\n k_down = bytearray([27, 91, 66])\n k_up = bytearray([27, 91, 65])\n k_esc = bytearray([27])\n with pty_stdin(k_left) as stdin:\n with Keyboard() as keyboard:\n assert keyboard.read() == KEY_LEFT\n stdin(k_right)\n assert keyboard.read() == KEY_RIGHT\n stdin(k_down)\n assert keyboard.read() == KEY_DOWN\n stdin(k_up)\n assert keyboard.read() == KEY_UP\n stdin(k_esc)\n assert keyboard.read() == KEY_ESC", "def until_not_multi(chars) -> str:\n import sys\n chars = list(chars)\n y = \"\"\n sys.stdout.flush()\n while True:\n i = read_single_keypress()\n _ = sys.stdout.write(i)\n sys.stdout.flush()\n if i not in chars:\n break\n y += i\n return y", "def whileKeyPressed(self, code, keys=None):", "def user_select_port_loop():\n liste_ports = serial.tools.list_ports.comports()\n ports = [port[0] for port in sorted(liste_ports)]\n r = [str(i) for i in range(len(ports))]\n port_user = None\n\n while(port_user not in r):\n\n print(\"--------------\")\n for nb, port in zip(r, ports):\n print(nb, \".\", port)\n\n print(\"--------------\")\n print(\"A . Actualiser\")\n print(\"S . Simulation\")\n print(\"E . Exit\")\n print(\"--------------\")\n \n port_user = input(\"Entrer le port désiré : \")\n if (port_user == \"E\"):\n sys.exit()\n \n if (port_user == \"S\"):\n return \"\"\n \n if (port_user == \"A\"):\n liste_ports = serial.tools.list_ports.comports()\n ports = [port[0] for port in sorted(liste_ports)]\n r = [str(i) for i in range(len(ports))]\n\n return ports[int(port_user)]", "def cli():\n print('Type \"exit\" or press Ctrl+C to leave.')\n inp = _read_input()\n\n while inp != \"exit\":\n print(translate(inp))\n inp = _read_input()\n\n print(\"Bye!\")", "def menu_pressed():\n MENU_LIST = [\n '1. Display Time \\n & IP Address \\x00',\n '2. Show Humidity\\n Temp and H2O \\x00',\n '3. Activate \\n Fogpi \\x00',\n '4. System \\nTest Collector \\x00',\n '5. System \\n Reboot \\x00',\n '6. System \\n Shutdown! \\x00',\n '7. Exit \\n \\x00']\n\n item = 0\n lcd.clear()\n lcd_queue.put(MENU_LIST[item], True)\n keep_looping = True\n while (keep_looping):\n press = read_buttons()\n # UP button\n if(press == UP):\n item -= 1\n if(item < 0):\n item = len(MENU_LIST) - 1\n lcd_queue.put(MENU_LIST[item], True)\n\n # DOWN Button\n elif(press == DOWN):\n item += 1\n if(item >= len(MENU_LIST)):\n item = 0\n lcd_queue.put(MENU_LIST[item], True)\n\n # SELECT button = exit\n elif(press == SELECT):\n keep_looping = False\n\n # Take action\n if( item == 0):\n #1. Display time and IP address\n display_ipaddr()\n elif( item == 1):\n #2. Show humidity, temperature, and H2O sensors\n get_sensors()\n elif( item == 2):\n #3. Show humidity, temperature, and H2O sensors\n get_fog()\n elif( item == 3):\n #4. Start the fogpi relay_humidity.py\n output = run_cmd('sudo python /home/pi/scripts/gpioX.py')\n # lcd_queue.put('Welcome to \\x01\\x02\\x03\\x04\\x05\\n SELECT => menu ', True)\n lcd_queue.join()\n elif( item == 4):\n #5. Reboot FogPi\n\t\tlcd_queue.put('Rebooting Fopi', True)\n lcd_queue.join()\n\t\tlcd.clear()\n\t\tlcd.backlight(0x00)\n\t\tlcd.off()\n output = run_cmd('sudo reboot')\n elif( item == 5):\n #6. Shutdown FogPi\n lcd_queue.put('Shutting down', True)\n lcd_queue.join()\n lcd.clear()\n lcd.backlight(0x00)\n lcd.OFF\n output = run_cmd('sudo shutdown now')\n lcd.clear()\n exit(0)\n elif( item == 6):\n lcd_queue.put('Welcome to \\x01\\x02\\x03\\x04\\x05\\n SELECT => menu ', True)\n lcd_queue.join()\n else:\n delay_milliseconds(99)", "def enter_raw_repl(self):\n\n debug_indent(\"enter_raw_repl\")\n\n time.sleep(0.5) # allow some time for board to reset\n debug(r'self.con.write \"\\r\\x03\\x03\" (Ctrl-C twice)')\n self.con.write(b\"\\r\\x03\\x03\") # ctrl-C twice: interrupt any running program\n\n # flush input (without relying on serial.flushInput())\n n = self.con.inWaiting()\n while n > 0:\n self.con.read(n)\n n = self.con.inWaiting()\n\n if self.con.survives_soft_reset():\n debug(r'self.con.write \"\\r\\x01\" (enter raw REPL)')\n self.con.write(b\"\\r\\x01\") # ctrl-A: enter raw REPL\n data = self.read_until(1, b\"raw REPL; CTRL-B to exit\\r\\n>\", timeout=10)\n\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n>\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 1\")\n\n debug(r'self.con.write \"\\x04\" (soft reset)')\n self.con.write(b\"\\x04\") # ctrl-D: soft reset\n data = self.read_until(1, b\"soft reboot\\r\\n\", timeout=10)\n if not data.endswith(b\"soft reboot\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 2\")\n\n # By splitting this into 2 reads, it allows boot.py to print stuff,\n # which will show up after the soft reboot and before the raw REPL.\n data = self.read_until(1, b\"raw REPL; CTRL-B to exit\\r\\n\", timeout=10)\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 3\")\n\n else:\n\n debug(r'self.con.write \"\\r\\x01\" (enter raw REPL)')\n self.con.write(b\"\\r\\x01\") # ctrl-A: enter raw REPL\n data = self.read_until(0, b\"raw REPL; CTRL-B to exit\\r\\n\", timeout=10)\n\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 4\")\n debug_unindent()", "def wait_for_keypress():\n if sys.platform == \"linux\" or sys.platform == \"linux2\" or sys.platform == \"darwin\":\n _ = system(\"read -s -n 1 -p \\\"Press any to continue....\\\"\")\n else:\n _ = system(\"pause\")\n print(\"\\nPress any to continue....\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getting mri (most recent influence) Returns 0 if no influence exists
def _get_mri(journal): try: return Influence.objects.filter(journal__issn=journal.issn).order_by('-date_stamp')[0] except IndexError: return 0
[ "def getFirstLumi(self):\n if hasattr(self.data, \"production\"):\n if hasattr(self.data.production, \"firstLumi\"):\n return self.data.production.firstLumi\n return 1", "def get_last_reward(self):\n return self.last_reward", "def getInitialMomentum(self) -> int:\n ...", "def _get_mimo(self):\n return self.__mimo", "def last_reward(self):\n return self.rewards[-1]", "def _get_reward(self):\n if self.is_game_done:\n return self.price - 1\n else:\n return 0.0", "def nom_marge_max(self):\n a=9\n for activite in self.activites:\n if a==activite.mar:\n return activite", "def getReward(self):\n if self.isFinished():\n win = (self.env.winner != self.opponent.colour)\n res = self.winnerReward\n if not win:\n res *= -1\n if self.alternateStarting and self.switched:\n # opponent colour has been inverted after the game!\n res *= -1\n return res\n else:\n return 0", "def _compute_reward(self): \n reward = -1\n return reward", "def getInteractionRate(self):\n m = mctal.MCTAL(self.name+'.m')\n t = m.tallies[4]\n # Returing the total\n return t.data[-1],t.errors[-1]", "def MFE_rel(self):\n try:\n return(self.MFE / self.price_open)\n except:\n return", "def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def get_optimal_reward(self):\n return self.optimal_reward", "def get_best_time():\n return best_reaction_time", "def get_reward_magnitude(id, data):\n pdata = data.loc[data[\"id\"] == id]\n return pdata[\"reward_mag\"].values[0]", "def _get_rsi(self):\r\n print(\"On veut acceder à l'attribut '_rsi'\")\r\n return self._rsi #C'est qu'on retourne l'att\r", "def get_reward(self) -> float:\r\n field = self.fields[self.agent_x][self.agent_y]\r\n if field == Field.EMPTY:\r\n return self.rew_empty\r\n elif field == Field.POS_TERMINAL:\r\n return self.rew_pos\r\n elif field == Field.NEG_TERMINAL:\r\n return self.rew_neg\r\n\r\n raise ValueError # Agent is standing on an illegal tile!\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if the node is a "real" endpoint of an edge in the network, \ otherwise False. OSM data includes lots of nodes that exist only as \ points to help streets bend around curves. An end point is a node that \
def is_endpoint(G: nx.Graph, node: int, strict=True): neighbors = set(list(G.predecessors(node)) + list(G.successors(node))) n = len(neighbors) d = G.degree(node) if node in neighbors: # If the node appears in its list of neighbors, it self-loops. this is # always an endpoint. return True # If node has no incoming edges or no outgoing edges, it must be an # endpoint elif G.out_degree(node) == 0 or G.in_degree(node) == 0: return True elif not (n == 2 and (d == 2 or d == 4)): # Else, if it does NOT have 2 neighbors AND either 2 or 4 directed # edges, it is an endpoint. either it has 1 or 3+ neighbors, in which # case it is a dead-end or an intersection of multiple streets or has # 2 neighbors but 3 degree (indicating a change from oneway to twoway) # or more than 4 degree (indicating a parallel edge) and thus is an # endpoint return True elif not strict: # Non-strict mode osmids = [] # Add all the edge OSM IDs for incoming edges for u in G.predecessors(node): for key in G[u][node]: osmids.append(G.edges[u, node, key]['osmid']) # Add all the edge OSM IDs for outgoing edges for v in G.successors(node): for key in G[node][v]: osmids.append(G.edges[node, v, key]['osmid']) # If there is more than 1 OSM ID in the list of edge OSM IDs then it is # an endpoint, if not, it isn't return len(set(osmids)) > 1 else: # If none of the preceding rules returned true, then it is not an # endpoint return False
[ "def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1", "def has_edge(self, e):\r\n return e in self.edges", "def isNodeRegisteredWithinEndpoint(self):\n pass", "def is_external_edge(self, eid):\n \n for fid in self.mesh.regions(1,eid) :\n if self.mesh.nb_regions(2,fid) == 1 :\n return True\n return False", "def is_edge(identifier):", "def door_in_edge(self, edge: list) -> bool:\n doors = self.get_interior_doors()\n room1 = self.get_rooms()[edge[0]]\n room2 = self.get_rooms()[edge[1]]\n for i in range(len(doors)):\n if utils.door_room_relation(doors[i], room1) and utils.door_room_relation(doors[i], room2):\n return True\n return False", "def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False", "def has_edge(self, edge):\n u, v = edge\n return v in self.node_neighbors.get(u, [])", "def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]", "def _point_faces_edge(self, edge, point):\n a = sqrt((edge[0][0] - edge[1][0]) ** 2 + (edge[0][1] - edge[1][1]) ** 2)\n b = sqrt((edge[0][0] - point[0]) ** 2 + (edge[0][1] - point[1]) ** 2)\n c = sqrt((edge[1][0] - point[0]) ** 2 + (edge[1][1] - point[1]) ** 2)\n ang1, ang2 = self._angle(b, a, c), self._angle(c, a, b)\n if ang1 > pi / 2 or ang2 > pi / 2:\n return False\n return True", "def has_edge(self, source, target):\n \n raise Exception(\"TODO IMPLEMENT ME!\")", "def isEdge(self, x, y):\n return y in self._dictOut[x]", "def is_edge(self, nid1, nid2):\n return self._G.IsEdge(nid1, nid2)", "def is_end_node():\n return False", "def is_edge_server() -> bool:\n return Config().args.port is not None", "def has_edge(self, v0, v1) -> bool:\n for i in self.ed:\n if v0 in i[0] and i[0].nbr(v0) == v1:\n return True\n return False", "def is_self_referential(self, edge):\n # Determine if edge is directed or not to choose the proper splitting character\n split_str = gt.determine_split_string(edge)\n\n # split the edge\n edge_split = edge.split(split_str)\n\n return edge_split[0] == edge_split[-1] and (edge_split[0] == self.start_kind or\n edge_split[0] == self.end_kind)", "def nodeEdgesBendable(G,n):\n\tif G.degree(n)==2:\n\t\t#find out if we have a bend.\n\t\tneighbors=G.neighbors(n)\n\t\tray1=n, neighbors[0]\n\t\tray2=n, neighbors[1]\n\t\tth=fun.getAngle(ray1,ray2)\n\t\tif abs(th-np.pi)>0.01 and np.pi/5.0<th<np.pi-np.pi/5.0:\n\t\t\t#not a straight line and corner angle is not too sharp (pi/5)\n\t\t\treturn True\n\treturn False", "def has_edge(self, i, j):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively build a path of nodes until you hit an endpoint node. Please note this method is taken directly from OSMnx, and can be found in \
def build_path( G: nx.Graph, node: int, endpoints: List[int], path: List[int]) -> List[int]: # For each successor in the passed-in node for successor in G.successors(node): if successor not in path: # If successor is already in path, ignore it, otherwise add to path path.append(successor) if successor not in endpoints: # If successor not endpoint, recursively call # build_path until endpoint found path = build_path(G, successor, endpoints, path) else: # If successor is endpoint, path is completed, so return return path if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])): # If end of the path is not actually an endpoint and the path's # first node is a successor of the path's final node, then this is # actually a self loop, so add path's first node to end of path to # close it path.append(path[0]) return path
[ "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def _generate_path(next_func, start):\n path = [start]\n while True:\n next_ = next_func(path[-1])\n if next_ is not None:\n path.append(next_)\n else:\n break\n return path", "def get_all_simple_paths_from_node(graph:nx.Graph, start_node_id, depth_limit, max_num_paths_per_node:int, max_length_of_each_path_leading_to_node:int, bar:progressbar.ProgressBar):\n \n all_reachable_nodes = list(single_source_shortest_path(graph, start_node_id, depth_limit))\n all_reachable_nodes = list(filter(lambda a: a != start_node_id, all_reachable_nodes)) # Get rid of self from list\n random.shuffle(all_reachable_nodes) # This is so random nodes get selected as they are in a depth flow out pattern.\n\n sequence = [] # formatted as [0] = y/predict, [1] = pivot node id, [2] = x/train\n\n for i in range(len(all_reachable_nodes)):\n bar.update()\n\n # 1. get path\n paths_from_start_node_id_to_reachable_node_iterator = all_simple_paths(graph, start_node_id, all_reachable_nodes[i], depth_limit)\n paths_from_start_node_id_to_reachable_node = []\n \n num_simple_paths_added = 0\n for simple_path in paths_from_start_node_id_to_reachable_node_iterator:\n paths_from_start_node_id_to_reachable_node.append(simple_path)\n num_simple_paths_added += 1\n if num_simple_paths_added > max_length_of_each_path_leading_to_node:\n break\n \n for path in paths_from_start_node_id_to_reachable_node:\n # 2. get surrounding nodes\n surrounding_nodes = get_all_paths_to_node_depth_limited(graph, start_node_id, 1)\n surrounding_nodes = list(filter(lambda n_id: n_id != start_node_id, surrounding_nodes)) # remove self\n\n # 3.1 remove any paths that cross over surrounding nodes twice.\n if len(set(surrounding_nodes) & set(path)) > 1:\n continue;\n\n # 3. remove surrounding nodes contained in paths\n surrounding_nodes = list(filter(lambda a: a not in path, surrounding_nodes))\n\n sequence.append((surrounding_nodes, start_node_id, path))\n\n if len(sequence) > max_num_paths_per_node:\n break\n\n return sequence", "def get_adjacent(self, node, endnode, counter, all_paths, visited):\n upwards = node[0] + \"u\"\n downwards = node[0] + \"d\"\n horizontal = node[0] + \"h\"\n end = False\n print(\"method call starts\")\n print(\"current node\")\n print(node)\n print(\"current queue\")\n print(all_paths)\n if re.fullmatch(self.HSO_REGEX, upwards) != None:\n upwards_neighbours = self.get_neighbours(node[1], self.UPWARDS_RELATIONS)\n for neighbour in upwards_neighbours:\n if neighbour == endnode:\n end = True\n if neighbour not in visited:\n all_paths.append((upwards, neighbour))\n visited.add(neighbour)\n if re.fullmatch(self.HSO_REGEX, downwards) != None:\n downward_neighbours = self.get_neighbours(node[1], self.DOWNWARDS_RELATIONS)\n for neighbour in downward_neighbours:\n if neighbour == endnode:\n end = True\n all_paths.append((downwards, neighbour))\n if re.fullmatch(self.HSO_REGEX, horizontal) != None:\n horizontal_neighbours = self.get_neighbours(node[1], self.HORIZONTAL_RELATIONS)\n horizontal_neighbours = horizontal_neighbours.union(self.get_neighbours_lexrel(node[1]))\n for neighbour in horizontal_neighbours:\n if neighbour == endnode:\n end = True\n if neighbour != node[1]:\n all_paths.append((horizontal, neighbour))\n print(\"queue now\")\n print(all_paths)\n\n newnode = all_paths.pop()\n counter = len(newnode[0])\n if not end and counter is not 5:\n print(\"new node\")\n print(newnode)\n\n\n return self.get_adjacent(newnode, endnode, counter, all_paths)\n else:\n return end, all_paths, counter", "def find_all_paths(self, start_member, end_member, path=[]):\n network = self.__network_dict \n path = path + [start_member]\n if start_member == end_member:\n return [path]\n if start_member not in network:\n return []\n paths = []\n for member in network[start_member]:\n if member not in path:\n extended_paths = self.find_all_paths(member, \n end_member, \n path)\n for p in extended_paths: \n paths.append(p)\n return paths", "def generate_final_path(node):\n final_path = []\n while node is not None:\n final_path.append(node) # Add the current node to the list\n node = node.parent # Move to parent node\n return final_path", "def allPaths(graph, start, end, maxTotalDist, maxDistOutdoors, path = []):\n\n path = path + [start]\n\n if start == end:\n totLength, outLength = pathLength(graph, path)\n if (totLength <= maxTotalDist) and (outLength <= maxDistOutdoors):\n return [path]\n if not (graph.hasNode(start)):\n return []\n paths = []\n for node in graph.childrenOf(start):\n if node[0] not in path:\n #print \"current path \" + str(path)\n extended_paths = allPaths(graph, node[0], end, maxTotalDist, maxDistOutdoors, path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def path(self, start, end):\n nodes = self._gen.nodes\n path = recursive_pathfind(start, end, nodes)\n if path is None:\n return\n \n curve_points = [format_coordinate(nodes[node_id].location, \n self._links, \n self._gen.nodes[0].location)[:3]\n for node_id in path]\n self._paths.append(str(self.pm.curve(p=curve_points, d=5)))", "def findPaths(self, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in self.graph:\n return []\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.findPaths(node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def findPathsToBase(A,bSize):\n M,N = A.shape\n pressedPaths = []\n\n #For every two nodes in the base find all paths between them\n for b1 in range(bSize):\n for b2 in range(bSize):\n #Remove all other base nodes from the graph so that\n #we only find paths that go through the specialization set\n if b1 == b2:\n #In this case we are looking for a cycle.\n mask = [b1]+list(range(bSize,N))\n newSize = len(mask) + 1\n reduA = np.zeros((newSize,newSize))\n #Because the networkx cycle finders don't do what we need\n #them to do, we create a new graph and find paths instead\n reduA[:-1,:-1] = A[mask,:][:,mask]\n #Remove ingoing edges from the base node and add to new node\n reduA[-1,:] = reduA[0,:]\n reduA[0,:] = np.zeros(newSize)\n G = nx.DiGraph(reduA.T)\n #Find paths from the base node to the new node\n #same as finding all the cycles\n paths = list(nx.all_simple_paths(G,0,newSize-1))\n\n else:\n mask = [b1,b2]+list(range(bSize,N))\n reduA = A[mask,:][:,mask]\n #Remove base node interactions\n reduA[:2,:2] = np.zeros((2,2))\n G = nx.DiGraph(reduA.T)\n paths = list(nx.all_simple_paths(G,0,1))\n\n #Process Paths so that they make sense when the rest of the base\n #set is added to the graph\n for p in paths:\n if p != []:\n if b1 == b2:\n p = np.array(p) + bSize-1\n else:\n p = np.array(p) + bSize-2\n p[[0,-1]] = [b1, b2]\n pressedPaths.append(p)\n\n return pressedPaths", "def generateNodePath(node):\n\tcurNode = node\n\tnodeList = []\n\twhile True:\n\t\tif curNode.parent is None:\n\t\t\tnodeList.append(curNode)\n\t\t\tbreak\n\t\tnodeList.append(curNode)\n\t\tcurNode = curNode.parent\n\treturn nodeList", "def find_path(self, start_member, end_member, path=[]):\n network = self.__network_dict\n path = path + [start_member]\n if start_member == end_member:\n return path\n if start_member not in network:\n return None\n for member in network[start_member]:\n if member not in path:\n extended_path = self.find_path(member, \n end_member, \n path)\n if extended_path: \n return extended_path\n return None", "def get_endpoints_in_node(self, context, node_id):", "def find(self, start, end, path=[]):\n\n path = path + [start]\n print(\"Start\", start)\n if start == end:\n return path\n for node in self.conn[start]:\n print(\"node\", node)\n if node not in path:\n print(\"path\", path)\n new_path = self.find(node, end, path)\n if new_path:\n return new_path", "def dfs_paths(graph, start, goal):\n stack = [(start, [start])]\n while stack:\n (vertex, path) = stack.pop()\n for edge in vertex.get_edges():\n if edge in graph.get_edges():\n if edge.get_vertex()[1] == goal:\n yield path + [edge.get_vertex()[1]]\n else:\n stack.append((edge.get_vertex()[1], path + [edge.get_vertex()[1]]))", "def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths", "def _find_routes(self, start_node, previous_nodes=None):\n if previous_nodes is None:\n previous_nodes = []\n\n routes = []\n for con in self.connections:\n if start_node == con.end:\n con.flip()\n if start_node == con.start:\n # if the connection ends in a box output,\n # add the connection (as a route of length 1)\n if con.end.is_box_output():\n routes.append([con])\n elif con.end.is_box_input():\n raise Exception(\"Route in connections detected, \"\n \"that ends at an input.\")\n elif con.end.is_switch_output():\n # check if there is conflict with previous nodes\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n # check orientation\n if con.end.switch.orientation == 1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n # Set orientation of the switch\n con.end.switch.orientation = -1\n # Add the node to the previous nodes and call the method\n # for the next node\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n next_step = self._find_routes(\n con.end.switch.input,\n previous_nodes=previous_nodes\n )\n # Merge the current connection with the resulting routes\n for route in next_step:\n routes.append([con] + route)\n # proceed the analogously for a switch input\n elif con.end.is_switch_input():\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n if con.end.switch.orientation == -1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n con.end.switch.orientation = 1\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n\n # continue with both outputs\n next_step0 = self._find_routes(\n con.end.switch.output[0],\n previous_nodes=previous_nodes\n )\n\n next_step1 = self._find_routes(\n con.end.switch.output[1],\n previous_nodes=previous_nodes\n )\n\n for route in next_step0:\n routes.append([con] + route)\n for route in next_step1:\n routes.append([con] + route)\n\n else:\n raise TypeError(f\"Node {con.end} not recognised\")\n\n return routes", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of all the paths to be simplified between endpoint nodes. \ The path is ordered from the first endpoint, through the interstitial \ nodes, to the second endpoint. Please note this method is taken directly from OSMnx, and can be found in \
def get_paths_to_simplify(G: nx.Graph, strict: bool=True) -> List[List[int]]: # First identify all the nodes that are endpoints endpoints = set([node for node in G.nodes() if is_endpoint(G, node, strict=strict)]) # Initialize the list to be returned; an empty list paths_to_simplify = [] # For each endpoint node, look at each of its successor nodes for node in endpoints: for successor in G.successors(node): if successor not in endpoints: # if the successor is not an endpoint, build a path from the # endpoint node to the next endpoint node try: paths_to_simplify.append( build_path(G, successor, endpoints, path=[node, successor])) except RuntimeError: # Note: Recursion errors occur if some connected component # is a self-contained ring in which all nodes are not # end points handle it by just ignoring that # component and letting its topology remain intact # (this should be a rare occurrence). log(('Recursion error: exceeded max depth, moving on to ' 'next endpoint successor'), level=lg.WARNING) return paths_to_simplify
[ "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def build_path(\n G: nx.Graph,\n node: int,\n endpoints: List[int],\n path: List[int]) -> List[int]:\n\n # For each successor in the passed-in node\n for successor in G.successors(node):\n if successor not in path:\n # If successor is already in path, ignore it, otherwise add to path\n path.append(successor)\n\n if successor not in endpoints:\n # If successor not endpoint, recursively call\n # build_path until endpoint found\n path = build_path(G, successor, endpoints, path)\n\n else:\n # If successor is endpoint, path is completed, so return\n return path\n\n if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):\n # If end of the path is not actually an endpoint and the path's\n # first node is a successor of the path's final node, then this is\n # actually a self loop, so add path's first node to end of path to\n # close it\n path.append(path[0])\n\n return path", "def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def find_all_paths(self, start_vertex, end_vertex, path=[]):\n graph = self.__graph_dict \n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n if start_vertex not in graph:\n return []\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_paths(vertex, \n end_vertex, \n path)\n for p in extended_paths: \n paths.append(p)\n return paths", "def conversion_path(graph, source, target):\n\n import networkx as nx\n\n path = nx.shortest_path(graph, source, target)\n\n return [(a, b) for a, b in zip(path[:-1], path[1:])]", "def shortestpaths(self, start, end, edgeweight=\"t_0\"):\n graph = self.graph\n shortest_nodepaths = list(\n nx.all_shortest_paths(\n graph, start, end, weight=edgeweight, method=\"dijkstra\"\n )\n )\n shortest_paths = []\n for path in shortest_nodepaths:\n edgepath = []\n for i in range(len(path) - 1):\n edgepath.append((path[i], path[i + 1]))\n shortest_paths.append(edgepath)\n\n return shortest_paths", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale\n y_base = point[1] * scale + self.border * scale\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path", "def findPaths(self, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in self.graph:\n return []\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.findPaths(node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths", "def pathsToEdgelist(self):\n max_rows = 100000\n self.edgelist = pd.DataFrame(index = np.arange(max_rows), columns = [\"source\",\"target\"])\n rowCount = 0\n for path in self.paths.values():\n for source, target in zip(path[0:-1],path[1:]):\n self.edgelist.iloc[rowCount,] = [source, target]\n rowCount += 1\n assert rowCount <= max_rows,\"More paths than max_rows.\"\n self.edgelist.dropna(inplace = True)\n self.edgelist = self.edgelist.drop_duplicates();\n #nodes are not updated!", "def construct_path(aux_structures, node_ids): \n nodes, ways, max_speed_dic = aux_structures\n path = []\n for id_ in node_ids:\n path.append(map_node_id_to_coordinates(aux_structures, id_))\n return path", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def all_shortest_paths(self, start_node, end_node):\n s=self.min_dist(start_node,end_node)\n return self.all_paths(start_node,end_node,s,[])", "def _return_paths_from_graph(G):\n path_list = []\n for conn_component in nx.connected_component_subgraphs(G):\n source = next(iter(conn_component)) # get one random node from the connected component\n path = [source]\n\n # the algorithm works by adding to path the adjacent nodes one after the other.\n # Because the source node could be in the middle of a path, basically two paths\n # are computed and merged. Eg. for the path [0, 1, 2, 3, 4] where `source` = 2\n # the algorithm finds the path for neighbor `1` which is [0, 1], and the path for neighbor `3`\n # which is [3, 4]. The path building starts by [2], then progress through [2, 1, 0], then is inverted\n # and to add the [3, 4] path to yield [0, 1, 2, 3, 4].\n for next_node in sorted(G[source]):\n seen = source\n i = 0\n while True:\n i += 1\n if i > len(conn_component):\n raise ScaffoldException\n adj_list = [x for x in G[next_node] if x != seen]\n if len(adj_list) == 0:\n path.append(next_node)\n break\n path.append(next_node)\n seen = next_node\n next_node = adj_list[0]\n path = path[::-1]\n path_list.append(path)\n return path_list", "def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))", "def symmetrify_paths(shortest_paths):\n for u in shortest_paths:\n for v in shortest_paths[u]:\n shortest_paths[u][v] = list(reversed(shortest_paths[v][u]))\n return shortest_paths", "def __construct_path(self) -> list:\n res = []\n for i in range(len(self.pred)):\n j = self.pred[i]\n res += [tuple()]\n if j == -1:\n continue # go to next iteration, -1 veut dire pas de prédécesseur\n while self.pred[j] != -1:\n res[i] += (j,)\n j = self.pred[j]\n\n # add parent node\n for i in range(len(self.pred)):\n if self.dist[i] != INF:\n res[i] += (self.node,) # on ajoute le noeud de départ\n res[i] = res[i][::-1] # reverse\n res[i] += (i,) # et le noeud d'arriver\n return res", "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Archive a GIT project and upload it to Dash.
def deploy_project(name, apikey, changed_files=None, repo=None, branch='master'): zbuff = StringIO() if changed_files is not None: changed_files = list(set(changed_files) | REQUIRED_FILES) _archive_project(name, zbuff, changed_files, repo, branch) zbuff.reset() payload = {'apikey': apikey, 'project': name} req = requests.post( DASH_API_URL + 'as/import.json?version=portia', files=[('archive', ('archive', zbuff, 'application/zip'))], params=payload ) if req.status_code == 200: project_url = DASH_API_URL.rsplit('/', 2)[0] + '/p/' + name return { 'status': 'ok', 'schedule_url': project_url } else: raise DeployError('Deploy to Dash failed: %s' % req.text)
[ "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def git_archive_and_upload_tar():\n current_branch = str(subprocess.Popen('git branch | grep \"*\" | sed \"s/* //\"', \\\n shell=True,\\\n stdin=subprocess.PIPE, \\\n stdout=subprocess.PIPE).communicate()[0]).rstrip()\n env.git_branch = current_branch\n local('git archive --format=tar %(git_branch)s > %(release)s.tar' % env)\n local('touch `git describe HEAD`.tag')\n local('tar rvf %(release)s.tar `git describe HEAD`.tag; rm `git describe HEAD`.tag' % env)\n local('gzip %(release)s.tar' % env)\n run('; mkdir -p %(path)s/releases/%(release)s' % env)\n run('; mkdir -p %(path)s/packages/' % env)\n rsync_project('%(path)s/packages/' % env, '%(release)s.tar.gz' % env, extra_opts='-avz --progress')\n run('cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz' % env)\n local('rm %(release)s.tar.gz' % env)", "def __git_archive():\n project = env.get('project', 'project')\n local(\"git archive HEAD -o %s.tar; git submodule foreach 'git archive --prefix ${path}/ HEAD -o ../temp.tar; gnutar -Af ../%s.tar ../temp.tar; rm ../temp.tar'; gzip -f %s.tar\" % (project, project, project))", "def command_archive_project(syn, args):\n archived = submission.archive_project(syn, args.submissionid, args.admin)\n\n if args.output:\n with open(args.output, \"w\") as out:\n json.dump(archived, out)\n logger.info(args.output)\n else:\n logger.info(archived)", "def upload_tar_from_git():\n require(\"release\", provided_by=[deploy])\n tree = prompt(\"Please enter a branch or SHA1 to deploy\", default=\"master\")\n local(\"git archive --format=tar %s | gzip > %s.tar.gz\" % (tree, env['release']))\n sudo(\"mkdir %(path)s/releases/%(release)s\" % env)\n put(\"%(release)s.tar.gz\" % env, \"%(path)s/packages/\" % env, use_sudo=True)\n sudo(\"cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz\" % env)\n local(\"rm %(release)s.tar.gz\" % env)", "def archive(self):\n args = {\"id\": self.id}\n _perform_command(self.owner, \"project_archive\", args)\n self.is_archived = \"1\"", "def archive_projectbuild(projectbuild, archive):\n transport = get_transport_for_projectbuild(projectbuild, archive)\n transport.archive()", "def github_archive(name, org, repo, ref, sha256):\n if name not in native.existing_rules():\n http_archive(\n name = name,\n strip_prefix = repo + \"-\" + ref,\n urls = [\"https://github.com/%s/%s/archive/%s.tar.gz\" % (org, repo, ref)],\n sha256 = sha256,\n )", "def _archive_repository(\n owner: str, project_name: str, secret_token: str\n) -> Tuple[bool, str]:\n project_settings = {\"archived\": \"true\"}\n\n headers = {\n \"Authorization\": f\"token {secret_token}\",\n }\n\n url = f\"https://{REST_HOST}/repos/{owner}/{project_name}\"\n\n response = patch(url, json=project_settings, headers=headers, verify=VERIFY_CERT)\n return response.ok, (\n f\"Status: {response.status_code}. \" f'Error: \"{response.text}\".'\n )", "def transfer():\n if not os.path.isfile('/tmp/{0}.tar.gz'.format(env.project)):\n utils.abort('Could not find code archive to send to server.')\n put('/tmp/{0}.tar.gz'.format(env.project), '/tmp/')\n clean_local()", "def archive(project, filename, pack_envs=False):\n return archiver._archive_project(project, filename, pack_envs)", "def export(repo_path, deploy_path, rev, opts={}):\n if not os.path.exists(deploy_path):\n mcv.file.mkdir(\n deploy_path,\n opts=mcv.util.merge_dicts(\n {'parents': True},\n mcv.util.select_keys(opts, ['mode', 'owner', 'group'])))\n\n cmd_tmpl = \"git archive {rev} | tar -x -C {dir}\"\n cmd = cmd_tmpl.format(rev=rev, dir=deploy_path)\n out = subprocess.Popen(\n cmd,\n cwd=repo_path,\n shell=True,\n stdout=subprocess.PIPE).communicate()[0]\n mcv.file.ch_ext(\n deploy_path,\n mcv.util.merge_dicts(\n {'recursive': True},\n mcv.util.select_keys(opts, ['owner', 'group'])))\n return out", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def _DeployArchive(self, identifiers, upload_url, labels):\n post_data = {}\n post_data[\"gcs_uri\"] = upload_url\n if labels:\n post_data[\"labels\"] = {}\n for k, v in labels.items():\n post_data[\"labels\"][k] = v\n api_response = apigee.ArchivesClient.CreateArchiveDeployment(\n identifiers, post_data)\n operation = apigee.OperationsClient.SplitName(api_response)\n return operation", "def deploy():\n build()\n collect()\n commit()\n push()", "def copy_metadata():\n\n committish = config[\"committish\"]\n output_dir = config[\"output_dir\"]\n\n bitbake_dir = \"MONTAVISTA/bitbake\"\n bitbake_dir_ref = \"%s:%s\" % (committish, bitbake_dir)\n\n try:\n\tcall([git, \"rev-parse\", bitbake_dir_ref], stdout=None, stderr=None)\n except:\n\tsys.stderr.write(\"Directory %s not found in %s\\n\" %\n\t\t\t (bitbake_dir, committish))\n\tsys.exit(1)\n\n repo_dir = os.getcwd()\n\n try:\n\tos.chdir(output_dir)\n except:\n\tsys.stderr.write(\"failed: chdir %s\\n\" % output_dir)\n\tusage()\n\n cmd = [git, 'archive', '--remote=%s' % repo_dir, bitbake_dir_ref]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n\n tar = tarfile.open(fileobj=p.stdout,mode='r|tar')\n file = tar.next()\n #tar.extractall()\n while file != None:\n \ttar.extract(file)\n\tfile = tar.next()\n tar.close()\n\n git_exit_code = p.wait()\n if git_exit_code:\n\traise Exception('%s returned %d\\n' % (' '.join(cmd), git_exit_code))", "def archive(self, archive, version=None, with_optionals=False):\n if version is None:\n version = self.get_current_version()\n archive_dir = os.path.join(os.path.dirname(archive), \"sources\")\n cwd = os.getcwd()\n os.chdir(self.basedir)\n if os.path.isdir(archive_dir):\n shutil.rmtree(archive_dir)\n os.mkdir(archive_dir)\n log(\"[.]\")\n p = system(\"git archive %s\" % version, run=False)\n system(\"tar -C %s -xf -\" % archive_dir, stdin=p.stdout)\n if not self.modules:\n self.eval_modules()\n for module in self.modules:\n os.chdir(os.path.join(self.basedir, module))\n log(\"[%s]\" % module)\n p = system(\"git archive --prefix=%s/ %s\" % (module, version),\n run=False)\n system(\"tar -C %s -xf -\" % archive_dir, stdin=p.stdout)\n if not self.addons:\n self.eval_addons(with_optionals)\n for addon in self.addons:\n os.chdir(os.path.join(self.basedir, \"addons\", addon))\n log(\"[%s]\" % addon)\n p = system(\"git archive --prefix=addons/%s/ %s\" % (addon, version),\n run=False)\n system(\"tar -C %s -xf -\" % archive_dir, stdin=p.stdout)\n make_zip(archive, archive_dir)\n shutil.rmtree(archive_dir)\n os.chdir(cwd)", "def pack(commit=\"HEAD\"):\n local(\"git archive --format=tar.gz --prefix=trac-cop/ -o /tmp/trac-cop.tar.gz %(commit)s\" %\n dict(commit=commit))", "def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search existing spider names in a project
def search_spider_names(project, apikey, name=''): payload = {'project': project, 'apikey': apikey, 'spider': name} req = requests.get(DASH_API_URL + 'spiders/list.json', params=payload) if req.status_code == 200: return [s.get('id') for s in req.json().get('spiders', [])] return []
[ "def site_search(name):\n return Site.find_by_name(name)", "def searchSite(search_name):\n for obj in obj_list:\n if search_name == obj.name:\n print(obj)\n break\n else:\n print(\"No climbing site with that name found... Returning to main menu.\")\n menu()", "def search(name: str = typer.Argument('', help='name of pipeline to search for')):\n client = precursor()\n for search_result in client.search_pipelines(name):\n typer.echo(f'{search_result.url:<60}{typer.style(search_result.name, bold=True)}')", "def SearchDemo(name, keyword):", "def spider_list(request, client_id):\n if request.method == 'GET':\n keyword = request.GET.get('keyword')\n spiders = SpiderSerializer(Spider.objects.filter(project__client=client_id, name__contains=keyword), many=True)\n return HttpResponse(JSONRenderer().render(spiders.data))", "def test_spider_name(spider):\n assert spider.name == \"all\"", "def dorkScanner():\n pysearch.PySearch()\n openfile = open(\"sites.txt\", 'r')\n urls = openfile.read()\n openfile.close()\n return urls", "def search():\n viewmode = int(plugin.get_setting('viewmode'))\n if viewmode is None: viewmode = 500\n plugin.set_view_mode(viewmode)\n searchtxt = ''\n searchtxt = plugin.get_setting('lastsearch')\n searchtxt = plugin.keyboard(searchtxt, 'Search All Sites', False)\n searchquery = searchtxt.replace(' ', '+')\n plugin.set_setting(key='lastsearch', val=searchtxt)\n litems = []\n allitems = []\n itemslist = []\n DOSTR8 = plugin.get_setting(key='dostr8')\n __imgnext__ = __imgsearch__.replace('search.png', 'next.png')\n siteurls = getAPIURLS()\n siteurls.pop('motherless')\n siteurls.pop('motherless_search')\n siteurls.pop('porkytube')\n for k, v in siteurls.iteritems():\n siteurl = v\n sitelabel = k\n if sitelabel.find(\"_search\") != -1: sitelabel = sitelabel.replace('_search', '')\n surl = siteurl\n if siteurl.find('{0}') == -1:\n if siteurl.find('search=gay&') != -1:\n surl = siteurl.replace('search=gay&', 'search={0}+gay&')\n else:\n surl = siteurl.replace('search=', 'search={0}')\n searchurl = surl.format(searchquery)\n itemslist = parseVideosUrl(searchurl)\n allitems.extend(makeVideoItems(itemslist, sitename=sitelabel))\n\n murl = 'http://motherless.com/feeds/search/gay+{0}/videos?format=json&sort=date&offset=0&limit=250'\n if searchtxt.find(\"-gay\") != -1:\n murl = 'http://motherless.com/feeds/search/{0}/videos?format=json&sort=date&offset=0&limit=250'\n searchtxt = searchtxt.replace('-gay','')\n searchquery = urllib.quote_plus(searchtxt)\n murl = murl.format(searchquery)\n try:\n itemslist = parseVideosUrl(murl)\n allitems.extend(makeVideoItems(itemslist, sitename='motherless'))\n except:\n xbmc.log(\"***ERROR GETTING MOTHERLESS SEARCH RESULTS***\\n{0}\".format(murl))\n litems = allitems\n litems.sort(key=lambda litems: litems.label) #= sorted(allitems, key=lambda allitems: allitems.label)\n #plugin.set_content('movies')\n #return plugin.finish(items=litems, sort_methods=[SortMethod.LABEL_IGNORE_THE, SortMethod.GENRE, SortMethod.DURATION, SortMethod.VIDEO_YEAR, SortMethod.VIDEO_RATING])\n return finish(litems)", "def addCrawler(name):\n global allCrawlerNames\n if name == 'scihub':\n allCrawlers.append(ScihubCrawler())\n allCrawlerNames = [ c.name for c in allCrawlers ]", "def search(self, plugin_name: Query(\"q\"),):", "def crawl_projects(self, projects):\n for proj in projects:\n self.crawl(proj)", "def projects(lancet, query):\n projects = lancet.timer.projects()\n\n if query:\n regexp = re.compile(query, flags=re.IGNORECASE)\n\n def match(project):\n match = regexp.search(project[\"name\"])\n if match is None:\n return False\n project[\"match\"] = match\n return True\n\n projects = (p for p in projects if match(p))\n\n for project in sorted(projects, key=lambda p: p[\"name\"].lower()):\n name = project[\"name\"]\n\n if \"match\" in project:\n m = project[\"match\"]\n s, e = m.start(), m.end()\n match = click.style(name[s:e], fg=\"green\")\n name = name[:s] + match + name[e:]\n\n click.echo(\n \"{:>9d} {} {}\".format(\n project[\"id\"], click.style(\"‣\", fg=\"yellow\"), name\n )\n )", "def search_azure_repos(self, almSetting, projectName=None, searchQuery=None):", "def search_specific_packages(search_term):\n return search_packages(search_term)", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def parse_search_results(self, response):\n repo_link_xpath = ('//ul[@class=\"repo-list js-repo-list\"]'\n '/li/h3/a')\n repo_urls = set(response.xpath(('{0}/@href'.format(repo_link_xpath)))\\\n .extract())\n\n for project in PROJECTS.itervalues():\n if project.short_url not in repo_urls:\n self.logger.error(('NOT FIND {0} in repos. URLs! Skipping '\n 'project `{1}`...').format(project.short_url,\n project.name))\n continue\n # else:\n crawled_infos = CrawledInfos(project_name=project.name)\n\n link_xpath = '{0}[@href=\"{1}\"]'.format(repo_link_xpath,\n project.short_url)\n next_url = list(extract_links(response, xpaths=link_xpath))[0]\n\n request = Request(next_url,\n callback=self.parse_project,\n meta={\n 'crawled_infos': crawled_infos,\n 'project': project,\n })\n self.requests.append(request)\n yield request", "def search(self, file_name, path):\n for item in path.iterdir(): \n if item.is_file() and item.name== file_name:\n self.lst.append(item.absolute())\n elif item.is_dir() and self.recursion: # If the -r command was used before the -s command\n self.search(file_name, item)", "def list_spiders(self, project):\n response = self.get('listspiders', project=project)\n\n try:\n self._assert_status_is_ok(response)\n except exceptions.ResponseNotOKException as e:\n if 'no active project' in str(e):\n raise exceptions.ProjectDoesNotExist('Project %s does not exist' % project)\n raise\n\n for spider in response.get('spiders', []):\n yield spider", "def get_searches(self):\n if self.csv_file:\n log.msg(\"Spider: Fetching searches from \" + self.csv_file, level=log.DEBUG) \n return self.search_factory.get_csv_searches() \n else:\n #Use some other source for target URLs - database?\n raise SainsburySpiderError(\"Cannot find input file \" + self.csv_file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download a zipped project from Dash.
def _download_project(name, apikey): payload = {'apikey': apikey, 'project': name, 'version': 'portia'} r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload) return r.content
[ "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def download_data():\n url = 'https://www.dropbox.com/s/p9wmkvbqt1xr6lc/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download_project_archive(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n filename = project.create_downloadable_archive()\n file_handle = open(filename, \"rb\")\n response = FileResponse(file_handle)\n\n response[\"Content-Length\"] = os.path.getsize(filename)\n response[\n \"Content-Disposition\"\n ] = 'attachment; filename=\"{}.zip\"'.format(project.name)\n\n return response\n else:\n raise PermissionDenied", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def download():\n url = \"https://www.transparenciafiscal.gob.sv/downloads/zip/0700-DGII-DA-2020-IMP02.zip\"\n zipname = \"taxes2020.zip\"\n content = requests.get(url).content\n with open(zipname, \"w\") as fd:\n fd.write(content)\n with ZipFile(zipname, 'r') as fz:\n fz.extractall()", "def download_zip(self, path: Path) -> Path:\n if not self.url:\n raise ValueError(\"Release must have a valid url to download the zip.\")\n\n with requests.get(self.url, stream=True) as response:\n with open(path, \"wb\") as download_file:\n shutil.copyfileobj(response.raw, download_file)\n\n return path", "def download_dataset(root: str):\n URL = \"https://github.com/deepmind/dsprites-dataset/archive/master.zip\"\n download_and_extract_archive(url=URL, download_root=root)", "def download_export():\n html = urllib2.urlopen(WCA_EXPORT_URL + '/export.html')\n soup = BeautifulSoup(html, 'html.parser')\n latest = soup.find('dl').find_all('a', href=re.compile('tsv'))[0].get('href')\n zippath = SCRIPT_DIR + WCA_EXPORT_DIR + '/' + latest\n if not os.path.isfile(zippath):\n with open(zippath, 'wb') as file:\n file.write(urllib2.urlopen(WCA_EXPORT_URL + '/' + latest).read())", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def download_data_archive(path: str = None, url: str = 'http://116.203.189.3/data.zip', DOI: str = None, if_exists: str = 'error'):\r\n # use default path if none was provided\r\n if path is None:\r\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', 'data'))\r\n \r\n # check if the data folder already exists\r\n if os.path.exists(path) and len(os.listdir(path)) > 0:\r\n if if_exists == 'error':\r\n raise OSError(f\"The data path {path} already exists and is not empty. Pass if_exists='prune' to remove it.\")\r\n elif if_exists == 'prune':\r\n shutil.rmtree(path)\r\n os.mkdir(path)\r\n else:\r\n raise AttributeError(f'if_exists must be one of \"error\", \"prune\"')\r\n \r\n # check which download route is used:\r\n if DOI is None:\r\n # now the data folder exists - download the archive\r\n print(f'Found Server URL: {url}\\nStart downloading...', end='', flush=True)\r\n \r\n req = requests.get(url, stream=True)\r\n zip = zipfile.ZipFile(io.BytesIO(req.content))\r\n\r\n print(f'done.\\nExtracting to {path}...', end='', flush=True)\r\n zip.extractall(os.path.abspath(os.path.join(path, '..')))\r\n print('done.', flush=True)\r\n else:\r\n # now the data folder exists - download the archive\r\n print(f'Found DOI: {DOI}\\nStart downloading...', end='', flush=True)\r\n\r\n # Build the URL from Zenodo DOI\r\n chunk = DOI.split('/')[-1]\r\n record = chunk.split('.')[1]\r\n\r\n # request the existing data from Zenodo API\r\n dat = requests.get(f'https://zenodo.org/api/records/{record}').json()\r\n for f in dat['files']:\r\n if f['type'] == 'zip':\r\n req = requests.get(f['links']['self'], stream=True)\r\n zip = zipfile.ZipFile(io.BytesIO(req.content))\r\n\r\n # extract the data to the data folder\r\n print(f'done.\\nExtracting to {path}...', end='', flush=True)\r\n zip.extractall(os.path.abspath(os.path.join(path, '..')))\r\n print('done.', flush=True)\r\n break", "def download_dependency_url(name, url, temp_path, build_path, config, zip=True):\n parsed = urlparse(url)\n fn = os.path.basename(parsed.path)\n target_name = os.path.join(temp_path, fn)\n logger.info(f\"Downloading {url} to {target_name}\")\n\n download_file(url, target_name)\n\n if zip:\n with zipfile.ZipFile(target_name, \"r\") as z:\n z.extractall(build_path)\n else:\n shutil.copy(target_name, os.path.join(build_path, \"GameData\"))", "def download(remove_archive):\n download_url = requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download', params={\n 'public_key': 'https://yadi.sk/d/TX5k2hkEm9wqZ',\n 'path': '/classification/rtsd-r3.tar.lzma'\n }).json()['href']\n\n archive_path = Path('./rtsd-r3.tar.lzma')\n with open(archive_path, 'wb') as file:\n archive_ = requests.get(download_url).content\n file.write(archive_)\n\n extract_to = Path('./datasets')\n extract_to.mkdir(parents=True, exist_ok=True)\n shutil.unpack_archive(archive_path, extract_to, format='xztar')\n os.remove(extract_to / 'rtsd-r3/.crop.swp')\n if (remove_archive):\n os.remove(archive_path)", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"git@github.com:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download_binary(self):\n self.archive = download_file(self.download_dir, self.client_version)", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def DownloadResource(url, path):\n import StringIO\n import zipfile\n\n import requests\n\n print(\"Downloading... {} to {}\".format(url, path))\n r = requests.get(url, stream=True)\n z = zipfile.ZipFile(StringIO.StringIO(r.content))\n z.extractall(path)\n print(\"Completed download and extraction.\")", "def _download( self ):\n self._system.execute_command( \"git\", [\"clone\", \"git@github.com:snoplus/snogoggles.git\", \n self.get_install_path()], cwd=os.getcwd(), verbose=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert to front facing coordinates
def get_front_facing_xz(self): yaw_radian = math.radians(self.cur_rotation) return cam.step * math.sin(yaw_radian) * math.cos(0), cam.step * math.cos( yaw_radian) * math.cos(0)
[ "def make_front_pos(self):\n self.front_pos = (self.base_pos[0], (self.base_pos[1] - self.imageheight))\n\n self.front_pos = self.rotationxy(self.base_pos, self.front_pos, self.radians_angle)", "def get_front_facing_xz():\n yaw_radian = math.radians(rot_deg)\n return move_speed * math.sin(yaw_radian) * math.cos(0), move_speed * math.cos(\n yaw_radian) * math.cos(0)", "def hex_in_front(self, coords, facing):\n x, y = coords\n neighbours = HexGrid.NEIGHBOURS_ODD if x % 2 else HexGrid.NEIGHBOURS_EVEN\n dx, dy = neighbours[facing]\n ret = x + dx, y + dy,\n return ret", "def frontFace(self):\n\n if not self.threedee:\n return gl.GL_CCW\n\n # Only looking at the mesh -> display\n # transform, thus we are assuming that\n # the MVP matrix does not have any\n # negative scales.\n xform = self.opts.getTransform('mesh', 'display')\n\n if npla.det(xform) > 0: return gl.GL_CCW\n else: return gl.GL_CW", "def front(self):\n return _almathswig.vectorPose2D_front(self)", "def front(self):\n return _almathswig.vectorPosition2D_front(self)", "def front(self):\n return _almathswig.vectorPosition6D_front(self)", "def getFacesFront(self):\n z = self.getFaceCenters()[2]\n from fipy.variables.faceVariable import FaceVariable\n return FaceVariable(mesh=self, value=z == _madmin(z))", "def normalised_front(self, front):\n\n front_x = [i[0] for i in front]\n front_y = [i[1] for i in front]\n\n max_x = max(front_x)\n max_y = max(front_y)\n\n min_x = min(front_x)\n min_y = min(front_y)\n\n x_norm = [(i-min_x)/(max_x-min_x) for i in front_x]\n y_norm = [(i-min_y)/(max_y-min_y) for i in front_y]\n\n front_norm = zip(x_norm,y_norm)\n return front_norm", "def _find_front(self):\n self.front = (laplace(self.working_mask) > 0).astype('uint8')\n # TODO: check if scipy's laplace filter is faster than scikit's", "def pareto_frontier(self) -> Tuple[Tensor, Tensor]:\n raise NotImplementedError(\"Pareto frontier not yet implemented.\")", "def _update_front_car(self):\n try:\n t = self._tf_buffer.lookup_transform(\n target_frame=frames[\"map1_frame_id\"],\n source_frame=frames[\"car1_frame_id\"],\n time=rospy.Time())\n except Exception as err:\n rospy.loginfo(err)\n else:\n global FRONT_POSE, FRONT_ORI, FRONT_TH\n FRONT_POSE[0] = t.transform.translation.x\n FRONT_POSE[1] = t.transform.translation.y\n FRONT_ORI[0] = t.transform.rotation.x\n FRONT_ORI[1] = t.transform.rotation.y\n FRONT_ORI[2] = t.transform.rotation.z\n FRONT_ORI[3] = t.transform.rotation.w\n _, _, FRONT_TH = tf.transformations.euler_from_quaternion(FRONT_ORI)", "def pareto_frontier(self) -> Tuple[Tensor, Tensor]:\n raise NotImplementedError(\n \"Pareto frontier not yet implemented.\"\n ) # pragma: no cover", "def front(self):\n return _osgAnimation.vectorFloatKeyframe_front(self)", "def to_zero_origin(self):\n self.position=[n-1 if n>0 else 0 for n in self.position]\n self.uv=[n-1 if n>0 else 0 for n in self.uv]\n self.normal=[n-1 if n>0 else 0 for n in self.normal]", "def get_origin(self):\n return self.x0, self.y0, self.z0", "def translated_to_origin(self) -> \"Shape\":\n # return Shape(self.points - self.axis_means())\n return self.translated_to_point(-self.axis_means())", "def get_origin(self) :\n return self.x0, self.y0, self.z0", "def front(self):\n return _osgAnimation.vectorMatrixKeyframe_front(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called by base init, after class change or format text change
def initFormat(self): pass
[ "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def set_text(self):\n pass", "def post_init(self):\n pass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def _post_init(self):\n pass", "def __init__(self, text, replacements):\n self.text = text\n self.replacements = replacements", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def __init__(self, *args, **kwargs):\n _richtext.RichTextLine_swiginit(self,_richtext.new_RichTextLine(*args, **kwargs))", "def _init_display(self):\n raise NotImplementedError", "def __init__(self, text, position=0, current_token=None):\n pass", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_objectapp_signals()", "def __init__(self, *args, **kwargs):\n _richtext.RichTextCtrl_swiginit(self,_richtext.new_RichTextCtrl(*args, **kwargs))\n self._setOORInfo(self)", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_gstudio_signals()", "def after_render(self):\n #first of all the text is fixed\n self.fix_text_characters()\n #check math inside titles\n self.math_inside_title = self.is_math_inside_title()", "def after_parsing(self):", "def __init__(self, *args, **kwargs):\n _richtext.RichTextPrinting_swiginit(self,_richtext.new_RichTextPrinting(*args, **kwargs))", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def __init__(self, *args, **kwargs):\n _richtext.RichTextEvent_swiginit(self,_richtext.new_RichTextEvent(*args, **kwargs))", "def __init__(self):\n super(DefaultEventFormatter, self).__init__(\n data_type=self.DATA_TYPE, format_string=self.FORMAT_STRING,\n format_string_short=self.FORMAT_STRING_SHORT)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change this field's type to newType with default format
def changeType(self, newType): self.__class__ = globals()[newType + 'Format'] self.format = self.defaultFormat self.initFormat()
[ "def changeFormatFieldType(self, formatName, fieldName, newFieldType):\n nodeFormat = (globalref.mainControl.activeControl.model.\n formats[formatName])\n field = nodeFormat.fieldDict[fieldName]\n field.changeType(newFieldType)", "def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n # first the column type must be defined\n raise Exception('The data type of this column is not yet defined!')", "def registerNewFieldType(self, fieldTypeClass):\n fieldformat.fieldTypes.append(fieldTypeClass.typeName)\n setattr(fieldformat, fieldTypeClass.__name__, fieldTypeClass)", "def change_dtype(input_filename,output_filename,input_fieldname,\n output_fieldname,new_dtype,grid_type,**grid_kwargs):\n\n field = iodriver.load_field(input_filename,\n file_type=\\\n iodriver.get_file_extension(input_filename),\n field_type='Generic',\n fieldname=input_fieldname,\n unmask=False,\n timeslice=None,\n grid_type=grid_type,\n **grid_kwargs)\n field.change_dtype(new_dtype)\n iodriver.write_field(output_filename,field,\n file_type=iodriver.get_file_extension(output_filename),\n fieldname=output_fieldname)", "def _type_convert(new_type, obj):\n return new_type(obj)", "def __modify_schema__(cls, field_schema):\n field_schema.update(type='string')", "def _convert_field_type(row):\n return row", "def edit_data_type(self, field_name: str, data_type: DataType):\n if field_name not in fields:\n logger.error('Failed to change data_type of field: %s', field_name) \n raise KeyError(f'{field_name} does not exist in fields', field_name)\n fields[field_name] = data_type\n logger.info(f'{field_name} set to {data_type}', field_name, data_type)", "def _make_serializable(self, field):\n if isinstance(field, datetime):\n return str(field)\n elif isinstance(field, Decimal):\n return float(field)\n else:\n return field", "def transform_field(field):\r\n field.name = field.attname\r\n if isinstance(field, models.AutoField):\r\n # The historical model gets its own AutoField, so any\r\n # existing one must be replaced with an IntegerField.\r\n field.__class__ = models.IntegerField\r\n elif isinstance(field, models.FileField):\r\n # Don't copy file, just path.\r\n field.__class__ = models.TextField\r\n\r\n # Historical instance shouldn't change create/update timestamps\r\n field.auto_now = False\r\n field.auto_now_add = False\r\n\r\n if field.primary_key or field.unique:\r\n # Unique fields can no longer be guaranteed unique,\r\n # but they should still be indexed for faster lookups.\r\n field.primary_key = False\r\n field._unique = False\r\n field.db_index = True\r\n field.serialize = True", "def field_type(self):\n return \"\"", "def restore_type(field_type, value):\n field_types = {\n 'BooleanField': string_to_bool,\n 'CharField': str,\n 'FloatField': float,\n 'IntegerField': int,\n }\n return_val = lambda x: x\n recast = field_types.get(field_type, return_val)\n return recast(value)", "def change_object_type(obj, new_type):\n # type: (Union[str, SupportsInt, SupportsFloat], str) -> Union[str, int, float]\n if new_type == 'str':\n return str(obj)\n elif new_type == 'int':\n return int(obj)\n elif new_type == 'float':\n return float(obj)\n else:\n raise IOError('expected_type \"{}\" is not supported in this function.'.format(new_type))", "def _convert_primitive_type(self, type, **kwargs):\n if in_kwargs_and_true(kwargs, \"input\"):\n return type\n if in_kwargs_and_true(kwargs, \"input_field\"):\n return get_field_as(type, _as=graphene.InputField)\n return get_field_as(type, _as=graphene.Field)", "def _assign_type(self, type):\n if self.is_input:\n return 'data'\n else:\n return type", "def from_typed_field(type, value):\n\treturn \"[\"+type+\"]\"+value", "def update_column_format(self):\n pass", "def put_type_into(preinstanced, data, defaults):\n data['type'] = preinstanced.value\n \n return data", "def _change_column_type(self, t_trans, value):\n # create an element object\n val = ForElement(value)\n\n # if not set the type and the define flagg\n self._format = val.get_fvalue()\n\n # set the type to the one in the transformator object\n self._type = t_trans.higher_type\n\n # go over all data\n for index in range(len(self._data)):\n if self._data[index] != None:\n # transform all non-Null entries\n self._data[index] = t_trans.to_higher_type(self._data[index])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns English name if assigned, o/w name
def englishName(self): if self.enName: return self.enName return self.name
[ "def english_name(self) -> str | None:\n return self.get_display_name(Locale('en'))", "def english_name(self):\n return self.language.english_name", "def english_name(self):\n return self._english_name", "def name_en(self):\n return self._name_en", "def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')", "def getTranslatedName(self, orEmpty=False):\n if self.translated_name is not None:\n return self.translated_name\n elif orEmpty is True:\n return ''\n else:\n return self.name", "def primary_name(names):\n\tlangs = names.keys()\n\tif 'en' in langs:\n\t\treturn names['en']\n\treturn names[langs[0]]", "def localized_name(self) -> str:\n return _(self.name)", "def get_abbreviation_of(self, name):\n for language in self.user_data['languages']:\n if language['language_string'] == name:\n return language['language']\n return None", "def get_name(self):\r\n # TODO: Implementasi method untuk mengembalikan name\r\n pass", "def name_en(self, name_en):\n self._name_en = name_en", "def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval", "def getStandardName(name: unicode) -> unicode:\n ...", "def get_name(self) -> str:", "def get_name_translation(self):\n\t\treturn frappe.get_value(\n\t\t\t\"Translation\",\n\t\t\t{\"source_text\": self.doc_type, \"language\": frappe.local.lang or \"en\"},\n\t\t\t[\"name\", \"translated_text\"],\n\t\t\tas_dict=True,\n\t\t)", "def get_display_name(self) -> str:", "def language_name(self) -> str:\n return self.random_element(self.language_names)", "def get_level_name(self, level_id):\n for (english_name, level_package) in self.levels[self.game]:\n if level_package.lower() == level_id.lower():\n return english_name\n return None", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return name used for labels add for required fields
def labelName(self): if self.isRequired: return '%s*' % self.name return self.name
[ "def label(field):\n if hasattr(field,'long_name'):\n return field.long_name\n elif hasattr(field,'units'):\n return \"%s (%s)\"%(field.nxname,field.units)\n else:\n return field.nxname", "def name_field_label(self):\n return self._name_field_label", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def s3_required_label(field_label):\n\n return TAG[\"\"](\"%s:\" % field_label, SPAN(\" *\", _class=\"req\"))", "def get_label(self) -> str:\n pass", "def Label(self) -> str:", "def _anon_name_label(self) -> str:\n name = getattr(self, \"name\", None)\n return self._anon_label(name)", "def getLabel(self):\n\n return self.name", "def field_label(self):\n return self._field_label", "def generate_label(self) -> str:\n return str(self.name) + str(self.index)", "def label(self):\n if self._label is not None:\n return str(self._label)\n else:\n return self.__class__.__name__", "def get_field_name(self):\r\n return self.name", "def field_label(field_name, bushfire=None):\r\n field_name = FIELD_MAPPING.get(field_name) or field_name\r\n if bushfire:\r\n try:\r\n return bushfire._meta.get_field(field_name).verbose_name\r\n except:\r\n return field_name\r\n else:\r\n return field_name", "def get_field_label(value, arg):\n try:\n model = apps.get_model('cts_forms', value)\n field = model._meta.get_field(arg)\n except FieldDoesNotExist:\n return arg.replace('_', ' ')\n\n return variable_rename.get(field.name, field.verbose_name)", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def make_label(descriptor: dict) -> str:\n pass", "def get_name(self) -> str:", "def get_label_field(self):\n\n return self.label_field", "def FieldName(self) -> str:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): prefix = self.prefix suffix = self.suffix if titleMode: if self.html: storedText = self.removeMarkup(storedText) if globalref.docRef.formHtml: prefix = self.removeMarkup(prefix) suffix = self.removeMarkup(suffix) else: if not self.html: storedText = escape(storedText).replace('\n', '<br />') if not globalref.docRef.formHtml: prefix = escape(prefix) suffix = escape(suffix) return u'%s%s%s' % (prefix, storedText, suffix)
[ "def __repr_title(self):\n return (\n self.title if not self.done\n else '̶'.join(c for c in self.title)\n )", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def processed_text(self):\n if self.caption is None:\n return \"\"\n else:\n text = re.sub('[\\n\\r]', ' ', self.caption)\n return text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def title(text, size=3):\n return '<h%s>%s</h%s>' % (size, text, size)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def emphasize(self, text):\n text1 = '*%s*' % text\n return text1", "def textFormats():\n log = QtGui.QTextCharFormat()\n log.setFontFamily(\"monospace\")\n \n url = QtGui.QTextCharFormat(log)\n url.setForeground(QtGui.QBrush(QtGui.QColor(\"blue\")))\n url.setFontUnderline(True)\n url.setAnchor(True)\n \n msg = QtGui.QTextCharFormat()\n msg.setFontFamily(\"sans-serif\")\n msg.setFontWeight(QtGui.QFont.Bold)\n \n msgok = QtGui.QTextCharFormat(msg)\n msgok.setForeground(QtGui.QBrush(QtGui.QColor(\"green\")))\n \n msgerr = QtGui.QTextCharFormat(msg)\n msgerr.setForeground(QtGui.QBrush(QtGui.QColor(\"red\")))\n \n return locals()", "def fmt_title(title, maxw):\n if len(title) <= maxw:\n return title\n else:\n return \"<%s\" % (title[-(maxw - 1):])", "def print_title(text):\n\tcolor_print(text, color='yellow', underline=True, bold=True)", "def _escape(self, text: str):\n\n\t\tif self.output_format == \"html\":\n\t\t\treturn html.escape(text)\n\t\telse:\n\t\t\treturn text", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def text_title(filename):\n lines = pdf_text(filename).strip().split('\\n')\n\n i = title_start(lines)\n j = title_end(lines, i)\n\n return ' '.join(line.strip() for line in lines[i:j])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return tuple of this field's text in edit format and bool validity, using edit format option
def editText(self, item): storedText = item.data.get(self.name, '') result = self.formatEditText(storedText) if self.isRequired and not result[0]: return (result[0], False) return result
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def _text_checkbox(self) -> Tuple[str, str, int, int]:\n w = ('bold' if self.weight_var.get() else 'normal')\n s = ('italic' if self.slant_var.get() else 'roman')\n u = (1 if self.underline_var.get() else 0)\n o = (1 if self.overstrike_var.get() else 0)\n ww = (w, s, u, o,)\n return ww", "def formatter(self):\n want_edit = str(self.ctype_box.currentText())\n default_value = self.data.get(want_edit, \"\")\n self.value_box.setText(default_value)", "def _get_field_edit_widget(self, row_index):\n field_row = self.field_rows[row_index]\n if not field_row.editable:\n raise TypeError(\"Cannot edit a boolean or dropdown field. (Internal error, tell the developer!)\")\n field_type = field_row.field_type\n field_value = self.get_field_dict(self.get_entry_id(self.active_row_index))[field_row.field_name]\n initial_text = repr(sorted(field_value)) if issubclass(field_type, list) else str(field_value)\n return self.Entry(\n field_row.value_box,\n initial_text=initial_text,\n integers_only=field_type == int,\n numbers_only=field_type == float,\n sticky=\"ew\",\n width=5,\n )", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def __get_field_format_options(self, data_type):\n try:\n return FieldDispatcher().get_format_options(data_type)\n except NotImplementedError:\n return False, False", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def updateText(self):\n self.errorFlag = False\n try:\n self.setText(self.field.editorText(self.node))\n except ValueError as err:\n if len(err.args) >= 2:\n self.setText(err.args[1])\n else:\n self.setText(self.node.data.get(self.field.name, ''))\n self.errorFlag = True\n if self.field.showRichTextInCell:\n self.doc.setHtml(self.text())\n else:\n self.doc.setPlainText(self.text())", "def ge(self):\r\n return(''.join(self.editedText))", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def format(self, text):\n return text.format(**self)", "def format_args(arg_type, text):\n if arg_type == 'list':\n args_text = text.strip().split()\n elif arg_type == 'flag' and not text:\n args_text = True\n else:\n args_text = text.strip()\n return args_text", "def getFormatFieldExtraText(self, formatName, fieldName):\n nodeFormat = (globalref.mainControl.activeControl.model.\n formats[formatName])\n field = nodeFormat.fieldDict[fieldName]\n return (field.prefix, field.suffix)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def validation_error_text(self):\n return self._validation_paragraph('error').text[0]", "def _task_field(attrs, field, required=False):\n \n # This uses the private meta attribute of the model, but there seems to be no other way\n try:\n return forms.CharField(\n widget=widgets.Textarea(attrs=attrs), required=required,\n help_text=Task._meta.get_field_by_name(field)[0].help_text\n )\n except:\n return forms.CharField(widget=widgets.Textarea(attrs=attrs), required=required)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return initial value in edit format, found in edit format option
def getEditInitDefault(self): return self.formatEditText(self.initDefault)[0]
[ "def formatter(self):\n want_edit = str(self.ctype_box.currentText())\n default_value = self.data.get(want_edit, \"\")\n self.value_box.setText(default_value)", "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def value(self):\n if not self.form.is_bound:\n data = self.form.initial.get(self.name, self.field.initial)\n if callable(data):\n data = data()\n # If this is an auto-generated default date, nix the\n # microseconds for standardized handling. See #22502.\n if (isinstance(data, (datetime.datetime, datetime.time)) and\n not getattr(self.field.widget, 'supports_microseconds', True)):\n data = data.replace(microsecond=0)\n else:\n data = self.field.bound_data(\n self.data, self.form.initial.get(self.name, self.field.initial)\n )\n return self.field.prepare_value(data)", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def value(value_or_editable_value):\n if isinstance(value_or_editable_value, EditableValue):\n return value_or_editable_value.initial_value\n else:\n return value_or_editable_value", "def get_initial_value (self):\n\n return None", "def _get_format_code(self):\n return self._format_code", "def GetDefaultFormat(self, p_int, p_int_1, bool):\n ...", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def default_format(self):\n return next(itervalues(self.formats))", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def formatsrc(self):\n return self[\"formatsrc\"]", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def get_format ( self, object, trait, row, column ):\n if column in [0, 1, 2]:\n a = self.format\n b = '%3d'\n self.format = b\n c = self._result_for( 'get_format', object, trait, row, column )\n self.format = a\n return c\n else:\n return self._result_for( 'get_format', object, trait, row, column )", "def get_value(self):\n # Get the value of the text in the line edit.\n print \"The value of the line edit is %s\" % self.some_le.text()", "def GetDefaultInternalFormat(self, p_int, p_int_1, bool):\n ...", "def get_default_format(cls) -> FormatStr:\n return FORMAT.inverse[cls.defaultFormat()]", "def template_format(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_format\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of choices for setting the init default
def initDefaultChoices(self): return []
[ "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def get_choices(self):\r\n return []", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def options(self, values):\r\n return self.get_choices()", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get(self) -> List:\n return lizardspock.choices", "def _getImportChoices(self):\n return ['auto', 'xmipp','relion', 'eman', 'dogpicker']", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def get_choices(self):\n return self.choices.all()", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def _get_field_choices(bottle):\n default_choice = ('', '-- select a field --')\n field_choices = bottle.get_field_choices()\n\n choices = [default_choice]\n choices.extend(field_choices)\n\n return choices", "def default_value_list(self) -> pulumi.Input[Sequence[pulumi.Input['BotSlotDefaultValueArgs']]]:\n return pulumi.get(self, \"default_value_list\")", "def set_choices(self, **kwargs):\n raise NotImplementedError()", "def choices(self) -> List[Choice]:\n return self._choices", "def choices(self):\n if self._choices:\n return self._choices\n\n self._choices = Request(\n base=\"{}/{}/_choices/\".format(self.api.base_url, self.name),\n token=self.api.token,\n private_key=self.api.private_key,\n http_session=self.api.http_session,\n ).get()\n\n return self._choices", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def get_choices(self, with_empty=False):\n choices = []\n for f in OPTION_REGEX.findall(self.options):\n choices.append((f[0].strip(), f[1].strip()))\n return choices", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired)
[ "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def _text_checkbox(self) -> Tuple[str, str, int, int]:\n w = ('bold' if self.weight_var.get() else 'normal')\n s = ('italic' if self.slant_var.get() else 'roman')\n u = (1 if self.underline_var.get() else 0)\n o = (1 if self.overstrike_var.get() else 0)\n ww = (w, s, u, o,)\n return ww", "def ge(self):\r\n return(''.join(self.editedText))", "def formatter(self):\n want_edit = str(self.ctype_box.currentText())\n default_value = self.data.get(want_edit, \"\")\n self.value_box.setText(default_value)", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def determineReprocessText():\n result = False\n \n #read in the file modification list and compare modified times\n try:\n with open(TEXT_CHANGED_FILENAME, \"r\") as fin:\n for line in fin:\n line = line.strip()\n data = line.split(\",\")\n if int(data[0]) != int(os.path.getmtime(data[1])):\n result = True\n break\n except IOError:\n result = True\n \n return result", "def updateText(self):\n self.errorFlag = False\n try:\n self.setText(self.field.editorText(self.node))\n except ValueError as err:\n if len(err.args) >= 2:\n self.setText(err.args[1])\n else:\n self.setText(self.node.data.get(self.field.name, ''))\n self.errorFlag = True\n if self.field.showRichTextInCell:\n self.doc.setHtml(self.text())\n else:\n self.doc.setPlainText(self.text())", "def format_text(text):\n\tftext_str = \"\" #init variable for holding formatted text\n\tftext_list = []\n\ttext_list = text.split('|')\n\ttext_key = False #doesn't start key colored\n\ttext_bold = False \n\ttext_under = False\n\ttext_blink = False\n\ttext_escape = False\n\tfor char in text:\n\t\ttext_attrs = []\n\t\ttext_color = ''\n\t\tif char == '%' and text_escape != True: #a % makes it either turn red or stop being red\n\t\t\ttext_key = not text_key #by inverting it\n\t\telif char == '*' and text_escape != True:\n\t\t\ttext_bold = not text_bold\n\t\telif char == '_' and text_escape != True:\n\t\t\ttext_under = not text_under\n\t\telif char == '@' and text_escape != True:\n\t\t\ttext_blink = not text_blink\n\t\telif char == '`' and (text_escape != True):\n\t\t\tpass\n\t\telse:\n\t\t\tif text_key:\n\t\t\t\ttext_attrs += ['bold']\n\t\t\t\ttext_color = 'red'\n\t\t\t\tchar = char.upper()\n\t\t\tif text_bold:\n\t\t\t\ttext_attrs += ['bold']\n\t\t\tif text_under:\n\t\t\t\ttext_attrs += [\"underline\"]\n\t\t\tif text_blink:\n\t\t\t\ttext_attrs +=[\"blink\"]\n\t\t\tif text_color:\n\t\t\t\tftext_str += colored(char, text_color, attrs=text_attrs)\n\t\t\telse:\n\t\t\t\tftext_str += colored(char, attrs=text_attrs)\n\t\tif char == '`' or text_escape:\n\t\t\ttext_escape = not text_escape\n\treturn ftext_str", "def textFormats():\n log = QtGui.QTextCharFormat()\n log.setFontFamily(\"monospace\")\n \n url = QtGui.QTextCharFormat(log)\n url.setForeground(QtGui.QBrush(QtGui.QColor(\"blue\")))\n url.setFontUnderline(True)\n url.setAnchor(True)\n \n msg = QtGui.QTextCharFormat()\n msg.setFontFamily(\"sans-serif\")\n msg.setFontWeight(QtGui.QFont.Bold)\n \n msgok = QtGui.QTextCharFormat(msg)\n msgok.setForeground(QtGui.QBrush(QtGui.QColor(\"green\")))\n \n msgerr = QtGui.QTextCharFormat(msg)\n msgerr.setForeground(QtGui.QBrush(QtGui.QColor(\"red\")))\n \n return locals()", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def clean_text2(text2, project_key):", "def __edit(text, edits, doubleSlash):\n for __edit in edits:\n _outText = []\n for line in text:\n if re.search(__edit[0], line, re.IGNORECASE):\n _newLine = re.sub(__edit[0], __edit[1], line, re.IGNORECASE)\n # \\rfactor in the substitute string gets escaped to \\\\rfactor\n # fix that by replacing \\\\ with \\\n # If the string is using \\\\ replace \\\\\\\\ with \\\\\n if doubleSlash:\n _outText.append(_newLine.replace(r'\\\\\\\\', r'\\\\'))\n else:\n _outText.append(_newLine.replace(r'\\\\', '\\\\'))\n else:\n _outText.append(line)\n text = list(_outText)\n return text", "def format_args(arg_type, text):\n if arg_type == 'list':\n args_text = text.strip().split()\n elif arg_type == 'flag' and not text:\n args_text = True\n else:\n args_text = text.strip()\n return args_text", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def test_normalize_text_for_edit_plain_text_default_plain_text(self):\n user = User.objects.create_user('test', 'test@example.com')\n Profile.objects.create(user=user, default_use_rich_text=False)\n\n text = normalize_text_for_edit(user, text='&lt; \"test\" **foo**',\n rich_text=True)\n self.assertEqual(text, '&amp;lt; &quot;test&quot; **foo**')\n self.assertTrue(isinstance(text, SafeText))", "def getData(self):\n if self.revButton.isChecked():\n rev = self.revEdit.text()\n elif self.tagButton.isChecked():\n rev = self.tagCombo.currentText()\n elif self.branchButton.isChecked():\n rev = self.branchCombo.currentText()\n else:\n rev = \"HEAD\"\n \n return (rev, self.formatComboBox.currentText(),\n Utilities.toNativeSeparators(self.fileEdit.text()),\n self.prefixEdit.text()\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): return [(text, '') for text in self.formatList]
[ "def get_choices(self):\r\n return []", "def selection_field_vocab(context, widget, data):\n return [\n ('opt_1', _('opt_1', default=u'Option 1')),\n ('opt_2', _('opt_2', default=u'Option 2')),\n ('opt_3', _('opt_3', default=u'Option 3'))\n ]", "def choices_completion_item() -> List[CompletionItem]:\n items = \\\n {\n 1: \"My item\",\n 2: \"Another item\",\n 3: \"Yet another item\"\n }\n return [CompletionItem(item_id, description) for item_id, description in items.items()]", "def options(self, values):\r\n return self.get_choices()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def get_choices(self, with_empty=False):\n choices = []\n for f in OPTION_REGEX.findall(self.options):\n choices.append((f[0].strip(), f[1].strip()))\n return choices", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def get_choices(self):\n return self.choices.all()", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices_provider(self) -> List[str]:\n return self.sport_item_strs", "def get_tag_choices():\n return list(Tag.select().select(Tag.id, Tag.title).tuples())", "def choices_completion_item(self) -> List[CompletionItem]:\n fancy_item = \"These things can\\ncontain newlines and\\n\"\n fancy_item += ansi.style(\"styled text!!\", fg=ansi.Fg.LIGHT_YELLOW, underline=True)\n items = {1: \"My item\", 2: \"Another item\", 3: \"Yet another item\", 4: fancy_item}\n return [CompletionItem(item_id, description) for item_id, description in items.items()]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def generate_choices_json(self):\n result = []\n for checklist in self.plane.checklists:\n result.append(checklist.name)\n result.append('RETURN TO PLANE SELECT')\n return result", "def _get_choices(self, task):\n # Standard, built-in choices.\n choices = [\n PromptChoice('s', 'Skip',\n lambda s, t: importer.action.SKIP),\n PromptChoice('u', 'Use as-is',\n lambda s, t: importer.action.ASIS)\n ]\n if task.is_album:\n choices += [\n PromptChoice('t', 'as Tracks',\n lambda s, t: importer.action.TRACKS),\n PromptChoice('g', 'Group albums',\n lambda s, t: importer.action.ALBUMS),\n ]\n choices += [\n PromptChoice('e', 'Enter search', manual_search),\n PromptChoice('i', 'enter Id', manual_id),\n PromptChoice('b', 'aBort', abort_action),\n ]\n\n # Send the before_choose_candidate event and flatten list.\n extra_choices = list(chain(*plugins.send('before_choose_candidate',\n session=self, task=task)))\n\n # Add a \"dummy\" choice for the other baked-in option, for\n # duplicate checking.\n all_choices = [\n PromptChoice('a', 'Apply', None),\n ] + choices + extra_choices\n\n # Check for conflicts.\n short_letters = [c.short for c in all_choices]\n if len(short_letters) != len(set(short_letters)):\n # Duplicate short letter has been found.\n duplicates = [i for i, count in Counter(short_letters).items()\n if count > 1]\n for short in duplicates:\n # Keep the first of the choices, removing the rest.\n dup_choices = [c for c in all_choices if c.short == short]\n for c in dup_choices[1:]:\n log.warning(\"Prompt choice '{0}' removed due to conflict \"\n \"with '{1}' (short letter: '{2}')\",\n c.long, dup_choices[0].long, c.short)\n extra_choices.remove(c)\n\n return choices + extra_choices" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split textStr using editSep, double sep's become char
def splitText(self, textStr): return [text.strip().replace('\0', self.editSep) for text in textStr.replace(self.editSep * 2, '\0'). split(self.editSep)]
[ "def test_prepare_value_with_custom_separator(self):\n field = ListEditField(sep=';')\n\n self.assertEqual(\n field.prepare_value(' foo; bar ; baz '),\n ['foo', 'bar', 'baz'])", "def split(string, sep='\\t'):\n return text_type.split(string, sep)", "def tab2text(self, text, charsep='\\001' ):\n return ''.join([self.tab2ascii[t] for t in text.split(charsep) if t != ''])", "def split_text(text):\n\tcommands = ['#0', '#1', '#2', '#3', '#4', '#5', '#6']\n\tif text[0:2] in commands:\n\t\treturn (text[0:2], text[2:].split())\n\telse:\n\t\treturn ('#0', text.split())", "def _sedsplit(text):\n split = []\n newstring = \"\"\n indices = _positions(text, \"/\")\n lastind = 0\n for i in range(0, len(indices)):\n if text[indices[i]-1] == \"\\\\\" and text[indices[i]-2] != \"\\\\\":\n newstring += text[lastind:indices[i]-1] + \"/\"\n else:\n newstring += text[lastind:indices[i]]\n split.append(newstring)\n newstring = \"\"\n lastind = indices[i] + 1\n if text[lastind:]:\n split.append(text[lastind:])\n else:\n split.append(\"\")\n return split", "def split(self, diff_separators):\n return super().split(diff_separators)", "def extended(self, new_char, new_char_index, sep=' '):\n if new_char == sep:\n return TextState(self.text + new_char, '', new_char_index), self.last_word\n if sep == '':\n return TextState(self.text + new_char, new_char, new_char_index), self.last_word\n return TextState(self.text + new_char, self.last_word + new_char, new_char_index), None", "def mysplit(s,delims):\r\n for c in delims:\r\n s = s.replace(c,' ')\r\n return s.split()", "def test_to_python_with_custom_separator(self):\n field = ListEditField(sep=';')\n\n self.assertEqual(\n field.to_python(['foo', 'bar', 'baz']),\n 'foo;bar;baz')", "def multi_split(text, seps):\n if not seps: # split by whitespaces\n return text.split()\n else: # split by separators in `seps`\n\n ##### Topics on Stack Overflow\n # http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators\n\n ## Method 1: use `re.split()` (from gimel)\n return re.split(r'[%s]' % seps, text)\n\n ## Method 2: DIY (from pprzemek)\n '''\n res = [text]\n for sep in seps:\n text, res = res, []\n for s in text:\n res += s.split(sep)\n return res\n '''", "def split_on_separators(original, separators):\n \n result = [original]\n \n #For each of the separators create a temporary list. Then for each of the \n #tokens in the list of the original string in result, split at the indicated \n #separator. Then the temporary list replaces the result. \n \n for sep in separators:\n temp = []\n for fragment in result:\n temp += fragment.split(sep)\n if not temp[-1]:\n temp = temp[:-1]\n result = temp\n return result", "def split_words_and_quoted_text(text):", "def separate(delim):\n # Return a function that takes an argument s, which when called will split\n # s over the delimiter specified (i.e. the delim parameter).\n return lambda s: s.split(delim)", "def split_on_spaces(text):\n\n\ttokens = text.split()\n\treturn tokens", "def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()", "def sep(self):\n self.sep_index = self.indices[self.sep_word]\n return self.sep_index", "def test_splitDelimiters(self):\n r = irc.split(\"xx yyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)\n r = irc.split(\"xx\\nyyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)", "def split(self, val_separators):\n return super().split(val_separators)", "def test_two_chars_and_separator():\n assert my_splitter(\",J\", \",\") == [\"\", \"J\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return tuple of choices from inText sorted like format and True if all splits are valid and included
def sortedChoices(self, inText): choices = self.splitText(inText) sortedChoices = [text for text in self.formatList if text in choices] if len(choices) == len(sortedChoices): return (sortedChoices, True) else: return (sortedChoices, False)
[ "def complete_opt_allow_select_scan(self, text, *_):\n return [t for t in (\"true\", \"false\", \"yes\", \"no\") if t.startswith(text.lower())]", "def _determine_guess(\n sentences: List[List[Literal]]) -> Tuple[bool, Tuple[str, bool]]:\n literals = [x[0] for x in sentences if len(x) == 1]\n if len(literals) != 0:\n literals.sort(key=lambda x: x.atom)\n selected = literals[0]\n if selected.negation:\n return [True, [selected.atom, False]]\n return [True, [selected.atom, True]]\n atoms = [atom for atom in chain.from_iterable(sentences)]\n atoms.sort(key=lambda x: x.atom)\n selected = atoms[0]\n return [False, [selected.atom, True]]", "def parse(question):\n # Handle things like \"should ___ X or Y\"\n if question.lower().startswith('should'):\n question = ' '.join(question.split()[2:])\n\n question = question.strip('?')\n # split on both ',' and ' or '\n choices = question.split(',')\n choices = sum((c.split(' or ') for c in choices), [])\n # Get rid of empty strings\n choices = filter(bool, (c.strip() for c in choices))\n return choices", "def can_process(self, statement):\r\n set1 = ['sweet', 'room']\r\n set2 = ['delux', 'room']\r\n set3 = ['condo', 'room']\r\n\r\n if all(x in statement.text.split() for x in set1):\r\n return True\r\n elif all(x in statement.text.split() for x in set2):\r\n return True\r\n elif all(x in statement.text.split() for x in set3):\r\n return True\r\n else:\r\n return False", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def detect_splitwords():\n\n # SPLITWORD RULES\n\n # RULE 1: splitwords starting with 'un'\n # Exceptions 'un' ADJA: unerwarterer, unglücklichen, unerschütterlichen\n # Exceptions 'un' ADJD: ungewöhnlicher\n if t_word[:2] == 'un' and (t_pos in UN_AUS_RULES_POS_TAGS):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])\n\n # RULE 2: splitwords with 'außerordentlich'\n if t_word[:15] == 'außerordentlich' and (t_pos in UN_AUS_RULES_POS_TAGS):\n create_splitword_tags(t_word[:5], t_word[5:])\n create_negation_frame()\n create_splitword_target(t_word[:5])\n create_splitword_focus(t_word[5:])\n create_splitword_negated(t_word[5:])\n create_splitword_scope(t_word[5:])\n\n # RULE 3: splitwords ending with 'los'\n # Exceptions: Some Focus Exceptions: 'zweifellos ADJD', 'ratlos ADJD'\n if t_word[-3:] == 'los':\n create_splitword_tags(t_word[:-3], t_word[-3:])\n create_negation_frame()\n create_splitword_target(t_word[-3:])\n create_splitword_focus(t_word[:-3])\n create_splitword_negated(t_word[:-3])\n create_splitword_scope(t_word[:-3])\n\n # RULE 4: splitwords ending with 'lose', or 'frei'\n if t_word[-4:] == 'lose' or t_word[-4:] == 'frei':\n create_splitword_tags(t_word[:-4], t_word[-4:])\n create_negation_frame()\n create_splitword_target(t_word[-4:])\n create_splitword_focus(t_word[:-4])\n create_splitword_negated(t_word[:-4])\n create_splitword_scope(t_word[:-4])\n\n # RULE 5: splitwords ending with 'loser|s|n'\n if t_word[-5:-1] == 'lose':\n create_splitword_tags(t_word[:-5], t_word[-5:])\n create_negation_frame()\n create_splitword_target(t_word[-5:])\n create_splitword_focus(t_word[:-5])\n create_splitword_negated(t_word[:-5])\n create_splitword_scope(t_word[:-5])", "def check_series(text_list, set_list):\n in_list = []\n for word in text_list:\n all_words = re.sub('\\(.*?\\)', ',', word).split(',')\n all_words = list(filter(None, all_words))\n component_in_list = [component.strip(' ') in set_list for component in all_words]\n this_word_in_list = all(component_in_list)\n in_list.append(this_word_in_list)\n return in_list", "def check_order(self, filename: str, section: str, texts: List[str]):\n alphas = sorted(texts, key=lambda x: x.split(':')[0].lower())\n if texts == alphas:\n return\n for text, alpha in zip(texts, alphas):\n if text != alpha:\n print(f'{filename}: {section}: {text} vs {alpha}')\n break", "def splitInPhrase(self,text,unit=\"string\"):\n aux = []\n rs = []\n\n text = text.lower()\n tokens = self.string2Token(text)\n for tk in tokens:\n if (tk[-1] is \".\") or (tk[-1] is \",\") or (tk[-1] is \"?\") or (tk[-1] is \"!\"):\n if tk in self.abbreviation:\n aux.append(tk)\n else:\n aux.append(tk)\n rs.append(self.token2String(aux))\n aux = []\n else:\n aux.append(tk)\n\n return rs", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def is_contraction(text):\n return text in [\"ll\", \"ve\"]", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def test__parse_choices():\n choice_0 = ApplicationCommandOptionChoice('suika')\n choice_1 = ApplicationCommandOptionChoice('suwako')\n \n for input_data, expected_output in (\n ({}, None),\n ({'choices': None}, None),\n ({'choices': []}, None),\n ({'choices': [choice_0.to_data()]}, (choice_0,)),\n ({'choices': [choice_0.to_data(), choice_1.to_data()]}, (choice_0, choice_1)),\n ):\n output = parse_choices(input_data)\n vampytest.assert_eq(output, expected_output)", "def IsValid(self):\n return (TickerFull.DelimiterSplit not in self.Text) and (TickerDataType.DelimiterData in self.Text)", "def get_choices(self, with_empty=False):\n choices = []\n for f in OPTION_REGEX.findall(self.options):\n choices.append((f[0].strip(), f[1].strip()))\n return choices", "def parse_texts(inTxtLst, Flag):\n if Flag == \"CN\":\n retTxtLst=[]\n texts = []\n id2text = {}\n id = 0\n for line in inTxtLst:\n text = remove_punctuation(line)\n a = nlp.tag(text)[0]\n res = []\n for i in range(len(a['word'])):\n if a['tag'][i] not in POS2filter:\n res.append(a['word'][i])\n id2text[id] = line\n texts.append(res)\n id += 1\n if id % 100 == 0:\n print(id)\n print(\"finished parsing\")\n texts = remove_stop_words(texts, stopwordspath)\n return texts, id2text\n elif Flag == \"EN\":\n retTxtLst=[]\n texts = []\n id2text = {}\n id = 0\n for line in inTxtLst:\n res = preprocess_string(line)\n id2text[id] = line\n texts.append(res)\n id += 1\n if id % 100 == 0:\n print(id)\n print(\"finished parsing\")\n return texts, id2text", "def match(self, text):\n root = self.root\n if isinstance(root, str):\n # root 是 str 的情况下,才能将 text 拆分成 left、right\n if self.use_re:\n if not re.search(self.root, text):\n return False\n left, right = re.split(self.root, text, 1)\n else:\n if self.root not in text:\n return False\n left, right = text.split(self.root, 1)\n\n if self.has and len(self.has) > 0 and not self._features_in(self.has, text):\n return False\n\n if self.left_has and len(self.left_has) > 0 and not self._features_in(self.left_has, left):\n return False\n\n if self.right_has and len(self.right_has) > 0 and not self._features_in(self.right_has, right):\n return False\n\n if self.without and len(self.without) > 0 and self._features_in(self.without, text):\n return False\n\n if self.left_without and len(self.left_without) > 0 and self._features_in(self.left_without, left):\n return False\n\n if self.right_without and len(self.right_without) > 0 and self._features_in(self.right_without, right):\n return False\n\n return True\n elif isinstance(root, list):\n if not self._features_in(root, text):\n return False\n\n # 对 has 进行判断\n if self.has and len(self.has) > 0 and not self._features_in(self.has, text):\n return False\n\n # 对 without 进行判断\n if self.without and len(self.without) > 0 and self._features_in(self.without, text):\n return False\n return True\n else:\n raise ValueError('`root` must be instance of str or list')", "def meets_generial_criterias(line):\n\n return is_line_long_enought(line) and is_line_in_english(line)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of choices for setting the init default
def initDefaultChoices(self): return [entry[0] for entry in self.getEditChoices()]
[ "def initDefaultChoices(self):\n return []", "def get_choices(self):\r\n return []", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def options(self, values):\r\n return self.get_choices()", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get(self) -> List:\n return lizardspock.choices", "def _getImportChoices(self):\n return ['auto', 'xmipp','relion', 'eman', 'dogpicker']", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def get_choices(self):\n return self.choices.all()", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def _get_field_choices(bottle):\n default_choice = ('', '-- select a field --')\n field_choices = bottle.get_field_choices()\n\n choices = [default_choice]\n choices.extend(field_choices)\n\n return choices", "def default_value_list(self) -> pulumi.Input[Sequence[pulumi.Input['BotSlotDefaultValueArgs']]]:\n return pulumi.get(self, \"default_value_list\")", "def set_choices(self, **kwargs):\n raise NotImplementedError()", "def choices(self) -> List[Choice]:\n return self._choices", "def choices(self):\n if self._choices:\n return self._choices\n\n self._choices = Request(\n base=\"{}/{}/_choices/\".format(self.api.base_url, self.name),\n token=self.api.token,\n private_key=self.api.private_key,\n http_session=self.api.http_session,\n ).get()\n\n return self._choices", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def get_choices(self, with_empty=False):\n choices = []\n for f in OPTION_REGEX.findall(self.options):\n choices.append((f[0].strip(), f[1].strip()))\n return choices", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort menu list choices
def sortChoices(self): self.formatList.sort()
[ "def _buildSortMenu(cls):\r\n res = [(elt[0], elt[1]) for elt in cls._SORT_OPTIONS]\r\n cls._SORT_MENU = [('relevance', 'relevance')] + res", "def choice_sort(A):\n pass", "def sort(self):\n self.BoxList.sort(key=lambda x: x.id_, reverse=False)", "def shell_sort(input_list):", "def application_command_autocomplete_choice_sort_key(choice):\n return choice['name']", "def sort_list():\n temp_list = list(listbox1.get(0, tk.END))\n temp_list.sort(key=str.lower)\n # delete contents of present listbox\n listbox1.delete(0, tk.END)\n # load listbox with sorted data\n for item in temp_list:\n listbox1.insert(tk.END, item)", "def listSortCriteriaTypes():", "def sort_option_names(options):\n return sorted(options, key=lambda x: (x == 'write-in:', x))", "def sort_place(self, *args):\r\n self.place_list.sort(self.spinner.text)\r\n self.root.ids.right_panel.clear_widgets()\r\n self.right_panel_widgets()", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def sort_options(self):\n return (\n SortOption(\n self.request, _(u'date (newest first)'),\n 'Date', reverse=True\n ),\n SortOption(self.request, _(u'alphabetically'), 'sortable_title'),\n )", "def sort_songs(self, *args):\n self.song_list.sort(self.spinner.text)\n self.root.ids.rightLayout.clear_widgets()\n self.build_right_layout()", "def choices(item_list, sort=False):\n if sort:\n item_list.sort()\n return tuple((i, i) for i in item_list)", "def enableSort(self):", "def sort(self):\n for section, section_items in self.items():\n if sorted(section_items) == list(section_items):\n continue\n\n section_dict = {k: v for k, v in section_items.items()}\n\n for k in list(section_items):\n self.remove_option(section, k)\n\n for k, v in sorted(section_dict.items()):\n self.set(section, k, v)", "def prep_ctn_nuclei_sort(self):\n # selection to orders\n self.sel_nuclei_sort = QtGui.QComboBox()\n\n # cycle through criteria for sorting\n for criteria in cfg.filter_criteria_nuclei:\n self.sel_nuclei_sort.addItem(criteria)\n\n # add listener\n self.sel_nuclei_sort.activated[str].connect(self.sort_nuclei_examples)\n\n # create container\n container = QtGui.QGridLayout()\n\n container.addWidget(QtGui.QLabel(gui_labels.label_sort), 0, 0)\n container.addWidget(self.sel_nuclei_sort, 0, 1)\n container.setColumnStretch(2, 1)\n\n return container", "def sort_by_type(self):\n # sort_by_type_sitem = self.locator_finder_by_idx(self.sort_by_type_id, 30)\n # sort_by_type_sitem = sort_by_type_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_type_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1) \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n sort_by_type = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[3]/a/label'\n sort_by_type_sitem = self.locator_finder_by_xpath(sort_by_type)\n else:\n sort_by_type_sitem = self.locator_finder_by_xpath(self.sort_by_type_id)\n\n sort_by_type_sitem.click()\n time.sleep(2)", "def sortOptions(self):\n return list(self.sort) if self.sort is not None else []", "def sort_options(command):\n command.params.sort(key=lambda p: p.name)\n return command" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set initial value from editor version using edit format option
def setInitDefault(self, editText): if editText in DateFormat.dateStampStrings: self.initDefault = DateFormat.dateStampStrings[0] else: TextFormat.setInitDefault(self, editText)
[ "def formatter(self):\n want_edit = str(self.ctype_box.currentText())\n default_value = self.data.get(want_edit, \"\")\n self.value_box.setText(default_value)", "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def setModelData(self, editor, model, index):\n try:\n date = datetime.strptime(str(editor.text()), self.format)\n model.setData(index, date, Qt.EditRole)\n except:\n pass # If the text does not conform to the date format, do nothing.", "def default_revision(self):", "def initFormat(self):\n pass", "def set_default_format(cls, fmt: FormatStr | core.QSettings.Format):\n cls.setDefaultFormat(FORMAT.get_enum_value(fmt))", "def on_editor_change(self, event):\n\n dlg = EditorDialog(self, self.editor)\n dlg.ShowModal()\n self.editor = dlg.get_editor()\n Settings.set_editor(self.editor)\n self.m_editor_text.SetValue(\" \".join(self.editor) if len(self.editor) != 0 else \"\")\n dlg.Destroy()\n event.Skip()", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def _set_settings_version(c, settings_path, version_line):\n version_const = \"VERSION\"\n\n print(f\"Adjusting {version_const} in {settings_path} to {version_line}...\")\n c.run(f'sed -i .orig \\'s/^{version_const} =.*$/{version_const} = \"{version_line}\"/\\' \"{settings_path}\"')", "def readVersion(self):\n ds = self.root.findall(\"[@format]\")[0]\n raw_format = ds.attrib['format']\n try:\n self.documentFormatVersion = int(raw_format)\n except ValueError:\n # as of fontTools >= 3.27 'format' is formatted as a float \"4.0\"\n self.documentFormatVersion = float(raw_format)", "def createEditor(self, parent, option, index):\n editor = QLineEdit(parent)\n date = index.model().data(index, Qt.DisplayRole)\n editor.setText(date.strftime(self.format))\n return editor", "def testSetEditorValue(self):\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n self._editorFactory.setEditorValue(lineEdit, u\"Test\")\r\n self.assertTrue(lineEdit.text() == u\"Test\" )\r\n \r\n spinBox = QtGui.QDoubleSpinBox()\r\n self._editorFactory.setEditorValue(spinBox, 2.05)\r\n self.assertTrue(spinBox.value() == 2.05)\r\n \r\n checkBox = QtGui.QCheckBox()\r\n self._editorFactory.setEditorValue(checkBox, True)\r\n self.assertTrue(checkBox.isChecked() == True)", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def make_edited(self, value):\n if value:\n import acm\n reddish = acm.UX().Colors().Create(195, 31, 0)\n self.w_notice.SetColor(\"Text\", reddish)\n self.w_notice.SetData(\"edited\")\n self.w_ok.Label(\"Save\")\n self.w_text_object_subtype.Editable(True)\n self.w_text_object_text.Editable(True)\n self.edited = True\n else:\n self.w_notice.SetData(\"\")\n self.w_ok.Label(\"Edit\")\n self.w_text_object_subtype.Editable(False)\n self.w_text_object_text.Editable(False)\n self.edited = False", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)", "def format_version(self, format_version):\n\n self._format_version = format_version", "def setEditor(key, value, prefClass=Prefs):\n if key == \"TabIndentOverride\":\n prefClass.settings.setValue(\"Editor/\" + key, json.dumps(value))\n else:\n prefClass.settings.setValue(\"Editor/\" + key, value)", "def _set_format_code(self, format_code = FORMAT_GENERAL):\n self._format_code = format_code\n self._format_index = self.builtin_format_id(format = format_code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return initial value in edit format, found in edit format option
def getEditInitDefault(self): if self.initDefault in DateFormat.dateStampStrings: return DateFormat.dateStampStrings[1] return TextFormat.getEditInitDefault(self)
[ "def formatter(self):\n want_edit = str(self.ctype_box.currentText())\n default_value = self.data.get(want_edit, \"\")\n self.value_box.setText(default_value)", "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def value(self):\n if not self.form.is_bound:\n data = self.form.initial.get(self.name, self.field.initial)\n if callable(data):\n data = data()\n # If this is an auto-generated default date, nix the\n # microseconds for standardized handling. See #22502.\n if (isinstance(data, (datetime.datetime, datetime.time)) and\n not getattr(self.field.widget, 'supports_microseconds', True)):\n data = data.replace(microsecond=0)\n else:\n data = self.field.bound_data(\n self.data, self.form.initial.get(self.name, self.field.initial)\n )\n return self.field.prepare_value(data)", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def value(value_or_editable_value):\n if isinstance(value_or_editable_value, EditableValue):\n return value_or_editable_value.initial_value\n else:\n return value_or_editable_value", "def get_initial_value (self):\n\n return None", "def _get_format_code(self):\n return self._format_code", "def GetDefaultFormat(self, p_int, p_int_1, bool):\n ...", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def default_format(self):\n return next(itervalues(self.formats))", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def formatsrc(self):\n return self[\"formatsrc\"]", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def get_format ( self, object, trait, row, column ):\n if column in [0, 1, 2]:\n a = self.format\n b = '%3d'\n self.format = b\n c = self._result_for( 'get_format', object, trait, row, column )\n self.format = a\n return c\n else:\n return self._result_for( 'get_format', object, trait, row, column )", "def get_value(self):\n # Get the value of the text in the line edit.\n print \"The value of the line edit is %s\" % self.some_le.text()", "def GetDefaultInternalFormat(self, p_int, p_int_1, bool):\n ...", "def get_default_format(cls) -> FormatStr:\n return FORMAT.inverse[cls.defaultFormat()]", "def template_format(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_format\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return conditional comparison value with realtime adjustments, used for date and time types' 'now' value
def adjustedCompareValue(self, value): if value.startswith('now'): return repr(GenDate()) return value
[ "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenTime())\n return value", "def condition(self):\n HH = str(time.localtime().tm_hour)\n MM = str(time.localtime().tm_min)\n return eval(self._cond_str)", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def query(self, current_time):\n if (current_time < self.a):\n return self.a\n else:\n return -1", "def compare(self, dt1, dt2): # real signature unknown; restored from __doc__\n return 0", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def set_when(day, today):\n if day < today:\n return \"past\"\n if day == today:\n return \"present\"\n return \"future\"", "def time_if():\n now = dt_util.now()\n if before is not None:\n time = dt_util.parse_time_str(before)\n if time is None:\n return False\n\n before_point = now.replace(hour=time.hour, minute=time.minute)\n\n if now > before_point:\n return False\n\n if after is not None:\n time = dt_util.parse_time_str(after)\n if time is None:\n return False\n\n after_point = now.replace(hour=time.hour, minute=time.minute)\n\n if now < after_point:\n return False\n\n if weekday is not None:\n now_weekday = WEEKDAYS[now.weekday()]\n\n if isinstance(weekday, str) and weekday != now_weekday or \\\n now_weekday not in weekday:\n return False\n\n return True", "def time_condition(self):\n return self._time_condition", "def check(self, comparison, value, zone, second_value=None):\n # We store temperature as milli-celsius in the database\n temp = get_temp(zone) / 1000\n value = float(value)\n if comparison == \"LE\":\n return temp <= value\n elif comparison == \"E\":\n return temp == value\n elif comparison == \"GE\":\n return temp >= value\n elif comparison == \"IN\":\n return temp >= value and temp <= second_value\n elif comparison == \"OUT\":\n return temp <= value or temp >= second_value", "def get_time_constant(self):\n # noinspection SpellCheckingInspection\n return 'OFLT?'", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")", "def get_next_fire_time(self, previous_fire_time, now):", "def after(v1,v2):\n return v1.time_left>v2.time_left", "def get_true_time(trace, when):\n if when == 'o' or when == 'O':\n return trace.stats.starttime\n elif when == 'p' or when == 'P':\n return trace.stats.starttime + trace.stats.sac.a\n elif when == 's' or when == 'S':\n return trace.stats.starttime + trace.stats.sac.t0\n elif type(when) == float:\n return trace.stats.starttime + when\n else:\n raise Exception(\"Not valid type.\")", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def comparison(self) -> Optional[pulumi.Input['MetricThresholdComparison']]:\n return pulumi.get(self, \"comparison\")", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return conditional comparison value with realtime adjustments, used for date and time types' 'now' value
def adjustedCompareValue(self, value): if value.startswith('now'): return repr(GenTime()) return value
[ "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenDate())\n return value", "def condition(self):\n HH = str(time.localtime().tm_hour)\n MM = str(time.localtime().tm_min)\n return eval(self._cond_str)", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def query(self, current_time):\n if (current_time < self.a):\n return self.a\n else:\n return -1", "def compare(self, dt1, dt2): # real signature unknown; restored from __doc__\n return 0", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def set_when(day, today):\n if day < today:\n return \"past\"\n if day == today:\n return \"present\"\n return \"future\"", "def time_if():\n now = dt_util.now()\n if before is not None:\n time = dt_util.parse_time_str(before)\n if time is None:\n return False\n\n before_point = now.replace(hour=time.hour, minute=time.minute)\n\n if now > before_point:\n return False\n\n if after is not None:\n time = dt_util.parse_time_str(after)\n if time is None:\n return False\n\n after_point = now.replace(hour=time.hour, minute=time.minute)\n\n if now < after_point:\n return False\n\n if weekday is not None:\n now_weekday = WEEKDAYS[now.weekday()]\n\n if isinstance(weekday, str) and weekday != now_weekday or \\\n now_weekday not in weekday:\n return False\n\n return True", "def time_condition(self):\n return self._time_condition", "def check(self, comparison, value, zone, second_value=None):\n # We store temperature as milli-celsius in the database\n temp = get_temp(zone) / 1000\n value = float(value)\n if comparison == \"LE\":\n return temp <= value\n elif comparison == \"E\":\n return temp == value\n elif comparison == \"GE\":\n return temp >= value\n elif comparison == \"IN\":\n return temp >= value and temp <= second_value\n elif comparison == \"OUT\":\n return temp <= value or temp >= second_value", "def get_time_constant(self):\n # noinspection SpellCheckingInspection\n return 'OFLT?'", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")", "def get_next_fire_time(self, previous_fire_time, now):", "def after(v1,v2):\n return v1.time_left>v2.time_left", "def get_true_time(trace, when):\n if when == 'o' or when == 'O':\n return trace.stats.starttime\n elif when == 'p' or when == 'P':\n return trace.stats.starttime + trace.stats.sac.a\n elif when == 's' or when == 'S':\n return trace.stats.starttime + trace.stats.sac.t0\n elif type(when) == float:\n return trace.stats.starttime + when\n else:\n raise Exception(\"Not valid type.\")", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def comparison(self) -> Optional[pulumi.Input['MetricThresholdComparison']]:\n return pulumi.get(self, \"comparison\")", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): try: return (repr(GenBoolean(editText)), True) except GenBooleanError: if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired)
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def _text_checkbox(self) -> Tuple[str, str, int, int]:\n w = ('bold' if self.weight_var.get() else 'normal')\n s = ('italic' if self.slant_var.get() else 'roman')\n u = (1 if self.underline_var.get() else 0)\n o = (1 if self.overstrike_var.get() else 0)\n ww = (w, s, u, o,)\n return ww", "def ge(self):\r\n return(''.join(self.editedText))", "def formatter(self):\n want_edit = str(self.ctype_box.currentText())\n default_value = self.data.get(want_edit, \"\")\n self.value_box.setText(default_value)", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def determineReprocessText():\n result = False\n \n #read in the file modification list and compare modified times\n try:\n with open(TEXT_CHANGED_FILENAME, \"r\") as fin:\n for line in fin:\n line = line.strip()\n data = line.split(\",\")\n if int(data[0]) != int(os.path.getmtime(data[1])):\n result = True\n break\n except IOError:\n result = True\n \n return result", "def updateText(self):\n self.errorFlag = False\n try:\n self.setText(self.field.editorText(self.node))\n except ValueError as err:\n if len(err.args) >= 2:\n self.setText(err.args[1])\n else:\n self.setText(self.node.data.get(self.field.name, ''))\n self.errorFlag = True\n if self.field.showRichTextInCell:\n self.doc.setHtml(self.text())\n else:\n self.doc.setPlainText(self.text())", "def format_text(text):\n\tftext_str = \"\" #init variable for holding formatted text\n\tftext_list = []\n\ttext_list = text.split('|')\n\ttext_key = False #doesn't start key colored\n\ttext_bold = False \n\ttext_under = False\n\ttext_blink = False\n\ttext_escape = False\n\tfor char in text:\n\t\ttext_attrs = []\n\t\ttext_color = ''\n\t\tif char == '%' and text_escape != True: #a % makes it either turn red or stop being red\n\t\t\ttext_key = not text_key #by inverting it\n\t\telif char == '*' and text_escape != True:\n\t\t\ttext_bold = not text_bold\n\t\telif char == '_' and text_escape != True:\n\t\t\ttext_under = not text_under\n\t\telif char == '@' and text_escape != True:\n\t\t\ttext_blink = not text_blink\n\t\telif char == '`' and (text_escape != True):\n\t\t\tpass\n\t\telse:\n\t\t\tif text_key:\n\t\t\t\ttext_attrs += ['bold']\n\t\t\t\ttext_color = 'red'\n\t\t\t\tchar = char.upper()\n\t\t\tif text_bold:\n\t\t\t\ttext_attrs += ['bold']\n\t\t\tif text_under:\n\t\t\t\ttext_attrs += [\"underline\"]\n\t\t\tif text_blink:\n\t\t\t\ttext_attrs +=[\"blink\"]\n\t\t\tif text_color:\n\t\t\t\tftext_str += colored(char, text_color, attrs=text_attrs)\n\t\t\telse:\n\t\t\t\tftext_str += colored(char, attrs=text_attrs)\n\t\tif char == '`' or text_escape:\n\t\t\ttext_escape = not text_escape\n\treturn ftext_str", "def textFormats():\n log = QtGui.QTextCharFormat()\n log.setFontFamily(\"monospace\")\n \n url = QtGui.QTextCharFormat(log)\n url.setForeground(QtGui.QBrush(QtGui.QColor(\"blue\")))\n url.setFontUnderline(True)\n url.setAnchor(True)\n \n msg = QtGui.QTextCharFormat()\n msg.setFontFamily(\"sans-serif\")\n msg.setFontWeight(QtGui.QFont.Bold)\n \n msgok = QtGui.QTextCharFormat(msg)\n msgok.setForeground(QtGui.QBrush(QtGui.QColor(\"green\")))\n \n msgerr = QtGui.QTextCharFormat(msg)\n msgerr.setForeground(QtGui.QBrush(QtGui.QColor(\"red\")))\n \n return locals()", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def clean_text2(text2, project_key):", "def __edit(text, edits, doubleSlash):\n for __edit in edits:\n _outText = []\n for line in text:\n if re.search(__edit[0], line, re.IGNORECASE):\n _newLine = re.sub(__edit[0], __edit[1], line, re.IGNORECASE)\n # \\rfactor in the substitute string gets escaped to \\\\rfactor\n # fix that by replacing \\\\ with \\\n # If the string is using \\\\ replace \\\\\\\\ with \\\\\n if doubleSlash:\n _outText.append(_newLine.replace(r'\\\\\\\\', r'\\\\'))\n else:\n _outText.append(_newLine.replace(r'\\\\', '\\\\'))\n else:\n _outText.append(line)\n text = list(_outText)\n return text", "def format_args(arg_type, text):\n if arg_type == 'list':\n args_text = text.strip().split()\n elif arg_type == 'flag' and not text:\n args_text = True\n else:\n args_text = text.strip()\n return args_text", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def test_normalize_text_for_edit_plain_text_default_plain_text(self):\n user = User.objects.create_user('test', 'test@example.com')\n Profile.objects.create(user=user, default_use_rich_text=False)\n\n text = normalize_text_for_edit(user, text='&lt; \"test\" **foo**',\n rich_text=True)\n self.assertEqual(text, '&amp;lt; &quot;test&quot; **foo**')\n self.assertTrue(isinstance(text, SafeText))", "def getData(self):\n if self.revButton.isChecked():\n rev = self.revEdit.text()\n elif self.tagButton.isChecked():\n rev = self.tagCombo.currentText()\n elif self.branchButton.isChecked():\n rev = self.branchCombo.currentText()\n else:\n rev = \"HEAD\"\n \n return (rev, self.formatComboBox.currentText(),\n Utilities.toNativeSeparators(self.fileEdit.text()),\n self.prefixEdit.text()\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the next value for a new node, increment format if increment is True
def nextValue(self, increment=True): try: prefix, numText, suffix = UniqueIDFormat.formatRe.\ match(self.format).groups() except AttributeError: self.format = UniqueIDFormat.defaultFormat return self.nextValue(increment) value = self.format if increment: pattern = u'%%s%%0.%dd%%s' % len(numText) num = int(numText) + 1 self.format = pattern % (prefix, num, suffix) return value
[ "def get_next(self): \n return self.nextval", "def _calc_next(self):\n d = self.interval\n self.next = d + int(time.time() / d)*d", "def next_node(self):\r\n # always increment to try and generate unique ids\r\n self.node_counter += 1\r\n # ... but also check node is valid\r\n while self.node_counter in self.nodes:\r\n self.node_counter += 1\r\n return self.node_counter", "def get_next(node):\n return node['next']", "def set_next(node, value):\n node['next'] = value", "def next_child(current, separator='.', default='1', alphabet=alphabet, **kw):\n\n if not validate_index(current, separator=separator): return default\n\n current = str(current)\n o = current.split(separator)[-1]\n if is_integer(o):\n value = alphabet[0]\n else:\n value = '1'\n\n return current + separator + value", "def get_next(node, offset):\n row, column = node\n row_offset, column_offset = offset\n return row + row_offset, column + column_offset", "def increment_node_index(self):\n self.node_index += 1", "def go_next(self):\n # update current as the next node\n self.current = self.current.next_node", "def get_next(self):\n return self.cur_node.next.data", "def next(self, delta=1):\n return Prufer.unrank(self.rank + delta, self.nodes)", "def next(self):\n if self._next_value:\n value = self._next_value[0]\n self._next_value = None\n return value\n \n return next(self.inner)", "def next(self):\n self.currentIndex += 1\n if self.currentIndex < len(values):\n result = 0\n else:\n result = 1\n self.currentIndex = 0\n return result", "def next_gen(self):\n self.current_gen += 1", "def increment(self):\n self.value = self + 1", "def next_line(self):\n\n self.current_line += 1\n\n return self.current_label()", "def next(self):\n\t\tif (self.current is None):\n\t\t\treturn None\n\t\t\n\t\tif (self.current == []):\n\t\t\tself.current = range(1, self.min + 1)\n\t\telse:\n\t\t\tself.current = Pattern_maker.increment(self.current)\n\t\t\n\t\twhile (not Pattern_maker.contains_relative(self.current, self.pat, self.rotate)):\n\t\t\tself.current = Pattern_maker.increment(self.current)\n\t\t\tif (self.current == None):\n\t\t\t\tif (self.min < self.max):\n\t\t\t\t\tself.min += 1\n\t\t\t\t\tself.current = range(1, self.min + 1)\n\t\t\t\telse:\n\t\t\t\t\treturn None\n\t\t\n\t\treturn self.current", "def _get_next_nodes(self):\n next_nodes = self.data[5] if not is_nan(self.data[5]) else \"eos\"\n if is_nan(next_nodes):\n next_nodes = \"eos\"\n return next_nodes", "def nextValue(self):\n return self.nextLong(self.items)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return formatted text, properly escaped and with a link to the picture if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): if titleMode: return TextFormat.formatOutput(self, storedText, titleMode, internal) paths = storedText.split('\n') results = ['<img src="%s">' % escape(url, treedoc.escDict) for url in paths] return u'<br />'.join(results)
[ "def image(self, src, title, text):\n src = escape_link(src)\n text = escape(text, quote=True)\n if title:\n title = escape(title, quote=True)\n html = '<img src=\"%s\" alt=\"%s\" title=\"%s\"' % (src, text, title)\n else:\n html = '<img src=\"%s\" alt=\"%s\"' % (src, text)\n if self.options.get('use_xhtml'):\n return '%s />' % html\n return '%s>' % html", "def image_title(self, text):\n \n self.title.text = text\n title_pos = (0, 0-self.size[0]/2.0*self.cfY-self.msgHeight)\n self.title.pos = title_pos", "def format_message(self, imageUrl, imageTitle):\n if imageTitle != \"\":\n message = imageTitle + \" [\" + imageUrl + \"]\"\n else:\n message = imageUrl\n\n return message", "def caption(picture):\n if hasattr(picture, 'name') and hasattr(picture, 'description'):\n return '<a href=\"%s\">%s</a><p>%s</p>' % \\\n (picture.get_absolute_url(), picture.name, picture.description or '')\n else:\n return ''", "def header_link(title):\n # This doesn't handle multiple titles with the same text in the\n # same file, but usually that's not a problem. GitHub makes\n # links like the-title, the-title-1, the-title-2 etc.\n result = ''\n for character in title:\n if character in string.whitespace:\n result += '-'\n elif character in string.punctuation:\n pass\n else:\n result += character.lower()\n return result", "def __repr_title(self):\n return (\n self.title if not self.done\n else '̶'.join(c for c in self.title)\n )", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, size=3):\n return '<h%s>%s</h%s>' % (size, text, size)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def get_title(text, uuid=None):\n title = pn.Row(pn.pane.Markdown('#### ' + text), align='start')\n\n if uuid is not None:\n title.append(provenance_link(uuid))\n\n return title", "def header_link(title):\n # This doesn't do the-title-1, the-title-2 etc. with multiple titles\n # with same text, but usually this doesn't matter.\n result = ''\n for character in title:\n if character in string.whitespace:\n result += '-'\n elif character in string.punctuation:\n pass\n else:\n result += character.lower()\n return result", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def processed_text(self):\n if self.caption is None:\n return \"\"\n else:\n text = re.sub('[\\n\\r]', ' ', self.caption)\n return text", "def __display_title(book):\n\n out = book[\"title\"]\n if book.get(\"subtitle\"):\n out += f\": {book['subtitle']}\"\n if book.get(\"volume_number\") and book.get(\"fascicle_number\"):\n out += f\" (vol. {book['volume_number']['raw']}; fas. {book['fascicle_number']['raw']})\"\n elif book.get(\"volume_number\"):\n out += f\" (vol. {book['volume_number']['raw']})\"\n elif book.get(\"fascicle_number\"):\n out += f\" (fas. {book['fascicle_number']['raw']})\"\n if book.get(\"volume_title\"):\n out += f\" / {book['volume_title']}\"\n return out", "def format_url(self, url, text):\r\n return u'<a href=\"%s\">%s</a>' % (escape(url), text)", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def markdown_item(title, url):\n print('* [{0}]({1})'.format(\n markdown_escape(title),\n markdown_escape(url),\n ))", "def draw_title(self):\r\n return mark_safe(('&nbsp;&nbsp;' * self.tree_path.count('/')) + self.title)", "def _get_title_tag(self, item):\n tag = '<{heading}><a href=\"{href}\">{title}</a></{heading}>'\n if self._field_is_visible(\"title\"):\n tile_conf = self.get_tile_configuration()\n title_conf = tile_conf.get(\"title\", None)\n if title_conf:\n heading = title_conf.get(\"htmltag\", \"h2\")\n href = item.absolute_url()\n title = item.Title()\n return tag.format(heading=heading, href=href, title=title)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interpolates between two vectors that are nonzero and don't both lie on a line going through origin. First normalizes v2 to have the same norm as v1. Then interpolates between the two vectors on the hypersphere.
def interpolate_hypersphere(v1, v2, num_steps): v1_norm = tf.norm(v1) v2_norm = tf.norm(v2) v2_normalized = v2 * (v1_norm / v2_norm) vectors = [] for step in range(num_steps): interpolated = v1 + (v2_normalized - v1) * step / (num_steps - 1) interpolated_norm = tf.norm(interpolated) interpolated_normalized = interpolated * (v1_norm / interpolated_norm) vectors.append(interpolated_normalized) return tf.stack(vectors)
[ "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def distanceOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return 0\n res = res[None, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)[0]", "def __min_norm_2(self, v1_v1, v1_v2, v2_v2):\n if v1_v1 <= v1_v2:\n return 1.0\n if v2_v2 <= v1_v2:\n return 0.0\n # calculate and return alpha\n return (v2_v2 - v1_v2) / (v1_v1 + v2_v2 - 2 * v1_v2)", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def _normalize(self, v1: np.ndarray, v0: np.ndarray):\n return (v1 - v0) / np.abs(v0)", "def make_q(v0, v2):\n return (v0.y - v2.y)/(v0.x - v2.x)", "def interpolatePoint(pt1, pt2, v):\n (xa, ya), (xb, yb) = pt1, pt2\n if not isinstance(v, tuple):\n xv = v\n yv = v\n else:\n xv, yv = v\n return xa + (xb - xa) * xv, ya + (yb - ya) * yv", "def proj(v1, v2):\n return np.dot(v1, v2)/np.linalg.norm(v2)", "def project_along_line(vertex1, vertex2, point):\n vertex2, vertex1, point = map(np.array, (vertex2, vertex1, point))\n unit_vect = normalize(vertex2 - vertex1)\n return vertex1 + unit_vect * np.dot(point - vertex1, unit_vect)", "def zot_sphere_inverse(x, y):\r\n pass", "def vincenty(lat1, lon1, lat2, lon2,\n r_major=6378.1370, r_minor=6356.752314, r_sphere=None):\n lat1 = m.radians(lat1)\n lat2 = m.radians(lat2)\n lon1 = m.radians(lon1)\n lon2 = m.radians(lon2)\n \n if (r_sphere is not None):\n r_major = r_sphere\n r_minor = r_sphere\n f = 0.0\n else:\n f = (r_major-r_minor)/r_major\n \n U1 = m.atan((1.0-f) * m.tan(lat1))\n U2 = m.atan((1.0-f) * m.tan(lat2))\n L = lon2 - lon1\n \n epsilon = 1E-12 # Accuracy (10E-12 -> ~ 0.06mm)\n max_iter = 500\n lam = L\n \n cU1 = m.cos(U1)\n cU2 = m.cos(U2)\n sU1 = m.sin(U1)\n sU2 = m.sin(U2)\n \n for i in range(max_iter):\n lam_old = lam\n sLam = m.sin(lam)\n cLam = m.cos(lam)\n sin_sig = m.sqrt((cU2*sLam)**2 + (cU1*sU2 - sU1*cU2*cLam)**2)\n cos_sig = sU1*sU2 + cU1*cU2*cLam\n sig = m.atan2(sin_sig,cos_sig)\n sin_alp = (cU1*cU2*sLam) / sin_sig\n cos2_alp = 1.0 - sin_alp**2\n if (cos2_alp == 0.0):\n # equitorial line\n cos_2sigm = 100\n C = 0.0\n else:\n cos_2sigm = cos_sig - (2.0*sU1*sU2)/cos2_alp\n C = f/16.0 * cos2_alp * (4.0 + f*(4.0-3.0*cos2_alp))\n lam = L + (1.0 - C) * f * sin_alp * \\\n (sig + C * sin_sig * (cos_2sigm + C * cos_sig * \\\n (-1.0 + 2.0 * cos_2sigm**2)))\n if ((m.fabs(lam - lam_old)) <= epsilon):\n # Found a solution in i iters...\n break\n elif (i == max_iter):\n # Catch the out of iters case, never seen this.\n raise Exception(\"Failed to solve for distance\")\n \n usq = cos2_alp * ((r_major**2 - r_minor**2) / r_minor**2)\n A = 1 + usq/16384 * (4096 + usq*(-768 + usq*(320 - 175*usq)))\n B = usq/1024 * (256 + usq*(-128 + usq*(74 - 47*usq)))\n del_sig = B * sin_sig * (cos_2sigm + 0.25*B*(cos_sig*( \\\n -1 + 2*cos_2sigm**2) - (1.0/6.0)*B*cos_2sigm * ( \\\n -3 + 4*sin_sig**2) * (-3 + 4 * cos_2sigm**2)))\n s = r_minor * A * (sig - del_sig)\n alp1 = m.atan2(cU2*m.sin(lam),(cU1*sU2-sU1*cU2*m.cos(lam)))\n alp2 = m.atan2(cU1*m.sin(lam),(cU1*sU2*m.cos(lam)-sU1*cU2))\n\n return (s, m.degrees(alp1), m.degrees(alp2))", "def lineIntersection(P1,D1,P2,D2):\n P1 = asarray(P1).reshape((-1,3)).astype(float64)\n D1 = asarray(D1).reshape((-1,3)).astype(float64)\n P2 = asarray(P2).reshape((-1,3)).astype(float64)\n D2 = asarray(D2).reshape((-1,3)).astype(float64)\n N = P1.shape[0]\n # a,b,c,d\n la,a = vectorNormalize(D1)\n lb,b = vectorNormalize(D2)\n c = (P2-P1)\n d = cross(a,b)\n ld,d = vectorNormalize(d)\n # sa,sb\n a = a.reshape((-1,1,3))\n b = b.reshape((-1,1,3))\n c = c.reshape((-1,1,3))\n d = d.reshape((-1,1,3))\n m1 = concatenate([c,b,d],axis=-2)\n m2 = concatenate([c,a,d],axis=-2)\n # This may still be optimized\n sa = zeros((N,1))\n sb = zeros((N,1))\n for i in range(P1.shape[0]):\n sa[i] = linalg.det(m1[i]) / ld[i]\n sb[i] = linalg.det(m2[i]) / ld[i]\n # X\n a = a.reshape((-1,3))\n b = b.reshape((-1,3))\n X = 0.5 * ( P1 + sa*a + P2 + sb*b )\n return Coords(X)", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def distancia_punto_a_linea(x0, x1, x2):\n assert x1.shape == x2.shape == (2,)\n return fabs(cross(x0-x1, x0-x2))/norm(x2-x1)", "def interpolate2(points, v):\n\n # figure out the line passing between these points\n (first, last) = sorted(points.keys())\n dy = float(points[last] - points[first])\n dx = log2(last) - log2(first)\n intercept = float(points[first]) - (log2(first) * dy / dx)\n\n # interpolate/extrapolate our position\n x = log2(v)\n y = intercept + (x * dy / dx)\n return y", "def _LinearInterpolate(x0, target, x1, y0, y1):\n if x0 == x1:\n return (y0 + y1) / 2\n return (y1 - y0) * (target - x0) / (x1 - x0) + y0", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of images, show an animation.
def animate(images): images = np.array(images) converted_images = np.clip(images * 255, 0, 255).astype(np.uint8) imageio.mimsave('./animation.gif', converted_images) return embed.embed_file('./animation.gif')
[ "def animate(self, images, delay=.25):\n for image in images:\n # Draw the image on the display buffer.\n self.set_image(image)\n\n # Draw the buffer to the display hardware.\n self.write_display()\n time.sleep(delay)", "def display_frames_as_gif(frames):\n #plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)\n display(display_animation(anim, default_mode='once'))", "def display_frames_as_gif(frames):\n fig=e.cube.show_layout(frames[0]) \n print(\"Drawn\")\n def animate(i):\n return e.cube.update_plot(frames[i])\n anim = animation.FuncAnimation(fig, animate, frames = len(frames), interval=50,blit=True)", "def display_frames_as_gif(frames):\n #plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)\n display(display_animation(anim, default_mode='loop'))", "def test_animation_images():\n sim = plonk.load_simulation(prefix=PREFIX, directory=DIR_PATH)\n\n snaps = [sim.snaps[0], sim.snaps[0], sim.snaps[0]]\n filename = Path('animation.mp4')\n visualize.animation_images(\n filename=filename,\n snaps=snaps,\n quantity='density',\n units={'position': 'au', 'density': 'g/cm^3'},\n adaptive_colorbar=False,\n num_pixels=(32, 32),\n )\n filename.unlink()", "def animate_frames(frames, jupyter=True, save_gif=False, path='./tmp_results/animation.gif'):\n fig, ax = plt.subplots(figsize=(12, 10))\n plt.axis('off')\n cmap = None if len(frames[0].shape) == 3 else 'Greys'\n patch = plt.imshow(frames[0], cmap=cmap)\n\n anim = animation.FuncAnimation(plt.gcf(),\n lambda x: patch.set_data(frames[x]), frames=len(frames), interval=30)\n\n if save_gif:\n writer = animation.PillowWriter(fps=25)\n anim.save(path, writer=writer)\n\n if jupyter:\n display(HTML(anim.to_jshtml())) # ipython extension\n else:\n plt.show()\n plt.close()", "def animate(tensors: Union[torch.Tensor, np.ndarray], interval=200, replay_delay=1000, file: Optional[str] = None) -> HTML: # noqa\n fig, ax = plt.subplots(figsize=(8, 8))\n\n imgs = []\n for img in tqdm.tqdm(tensors):\n if hasattr(img, \"numpy\"):\n img = img.numpy()\n\n img = ax.imshow(img, animated=True)\n imgs.append([img])\n\n ani = animation.ArtistAnimation(fig, imgs, interval=interval, blit=True, repeat_delay=replay_delay)\n if file is not None:\n ani.save(file)\n return HTML(ani.to_html5_video())", "def animate(directory,gifname,n_t,step=2,duration=0.2):\n\t# create list of filenames\n\tfnames = dir_fname(directory,\"*\")\n\t# create list of plots\n\timages=[] \n\tfor k in range(0,n_t):\n\t\tk = k*step\n\t\tprint('Mounting Im '+ str(k))\n\t\tFIG_NAME=fnames[k]\n\t\timages.append(imageio.imread(FIG_NAME)) # read\n\t# Now we can assemble the video\n\timageio.mimsave(gifname, images,duration=duration) # create gif\n\tprint('Animation'+gifname+'Ready')\n\treturn True", "def blowup_animation(coord):\n for image in EXPLOSION_IMAGES: # go through the list of images in the list of pictures and play them in sequence \n #Determine the location and size to display the image\n image = pygame.transform.scale(image, (TILESIZE+10, TILESIZE+10))\n DISPLAYSURF.blit(image, coord)\n pygame.display.flip()\n FPSCLOCK.tick(EXPLOSIONSPEED) #Determine the delay to play the image with", "def applyAndShow(images, action, cmap='gray'):\n output = applyAndPack(images, action)\n showImages(output, 2, 3, (15, 13), cmap=cmap)\n return output", "def show_images(self, idxs, title):\n fig = plt.figure(figsize=(15, 3))\n \n fig_i = 1\n for i in idxs:\n img, label = self.image_dataset[i]\n img = img.numpy()\n ax = fig.add_subplot(1, len(idxs), fig_i)\n ax.axis(\"off\")\n fig.suptitle(title)\n imshow(img)\n fig_i += 1\n \n return fig", "def setAnimations(*args):", "def show_images(processed_video, unprocessed_video, arraylength, isFirst, levels, calculating_boarder, fps):\n processed_video = upsample_images(processed_video, unprocessed_video, arraylength, levels)\n if not isFirst:\n processed_video = processed_video[-calculating_boarder:]\n for image in processed_video:\n time.sleep(1/fps)\n cv2.imshow(\"colour changes pulse\", image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break", "def display_animation(anim):\n plt.close(anim._fig)\n return HTML(anim_to_html(anim))", "def show_ico_frames(self):\n\n sizes = [(16,16), (24,24), (32,32), (48,48),\n (64,64), (128,128), (255,255)]\n xCoord = 10 \n\n for frame in sizes[::-1]:\n img = self.mainImage.resize((frame[0], frame[1]), Image.ANTIALIAS)\n bmp = pil_image_to_wx_image(img)\n wx.StaticBitmap(self.mainPanel, -1,\n wx.BitmapFromImage(bmp), (xCoord, 10)) \n xCoord += frame[0] + 10\n \n self.Refresh()", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def make_animation(title, assignments):\n # Create our intermediate directory for storing the component .png images\n shutil.rmtree('source_images')\n os.mkdir('source_images')\n\n # Create district maps of each assignment\n for i in range(len(assignments)):\n a = np.array(assignments.loc[i, :])\n plot_district_map(a, save=True, savetitle='source_images/{}.png'.format(i))\n\n # Gather the appropriate paths\n image_path = Path('source_images')\n images = list(image_path.glob('*'))\n image_list = []\n for file_name in images:\n image_list.append(imageio.imread(file_name))\n\n # Write the .gif\n imageio.mimwrite('Animations/{}'.format(title), image_list)\n\n # Delete the intermediate directory source_images\n shutil.rmtree('source_images')", "def show_images(self, images, save_path=None):\n # create 1 figure with multiple subplots\n figure = plt.figure()\n rows, columns = 1, 3\n\n # enumerate through all images\n for i, (k, v) in enumerate(images.items()):\n # extract the prediction and groundtruth angles\n prediction = v['prediction']\n groundtruth = v['groundtruth']\n\n # read and resize the image\n image = cv2.imread(k, 1)\n image = cv2.resize(image, (400, 400))\n\n # draw the predicted and groundtruth angles on the image\n image = draw_on_frame(image, prediction, groundtruth, put_text=False)\n\n # add the image to a subplot\n # add the prediction and groundtruth angles as text\n figure.add_subplot(rows, columns, i+1)\n plt.axis('off')\n plt.title('Prediction: {0:.3f}\\nActual: {1:.3f}'.format(math.degrees(prediction),\n math.degrees(groundtruth)))\n plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n # save the figure with the subplots to disk\n if save_path:\n plt.savefig(save_path)\n\n plt.show()", "def show_images(self):\n\t\tself.im.show()\n\t\tself.kmeans_colorset_im.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the session token from the secret_key field.
def extract_session_from_secret(secret_key, session_token): if secret_key and '@@@' in secret_key and not session_token: return secret_key.split('@@@')[0], secret_key.split('@@@')[1] else: return secret_key, session_token
[ "def get_token_by_session(self, session_id):\n LOG.debug(\"Get token for session id {}\".format(session_id))\n return self.sessions.get_secret(session_id)", "def get_session(session_token: str):\n service_session_token= app.db.fetchone(\"\"\"SELECT * FROM service_session_token WHERE session_token=%s\"\"\",\n [session_token])\n\n return service_session_token", "def get_session_token():\n\n session_token = None\n if 'Session-Token' in request.headers or 'session_token' in session:\n session_token = request.headers.get('Session-Token')\n if not session_token:\n session_token = session.get('session_token')\n\n return session_token", "def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret", "def secret_key():\n return api_settings.TOKEN_PRIVATE_KEY or api_settings.TOKEN_SECRET_KEY", "def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def getSessionFromJWT(token):\n token_payload = token.split('.')[1]\n\n padded_token = token_payload + \"=\" * divmod(len(token_payload), 4)[1]\n session_data = json.loads(base64.urlsafe_b64decode(padded_token))\n\n return session_data", "def get_session_token(self, server):\n headers = {\"MMSAuth\": self.auth_token, \"MMSAuthSig\": self.auth_sig}\n url = \"https://%s/info/session/token\" % server\n session_token = self.session.get(url, headers=headers).text\n\n return session_token", "def get_aws_session_token():\n r = requests.get(\"http://169.254.169.254/latest/meta-data/iam/security-credentials/\")\n instance_profile_name = r.text\n\n r = requests.get(\"http://169.254.169.254/latest/meta-data/iam/security-credentials/%s\" % instance_profile_name)\n response = r.json()\n\n credential = {\n 'aws_access_key_id' : response['AccessKeyId'],\n 'aws_secret_access_key' : response['SecretAccessKey'],\n 'aws_session_token' : response['Token']\n }\n\n return credential", "def env_var_aws_session_token():\n return 'AWS_SESSION_TOKEN'", "def get_token_from_secret_file(secret_file_path):\n try:\n with open(secret_file_path, \"r\") as f:\n return f.readline()\n except FileNotFoundError:\n raise BaseSpaceDownloadError(\"Secret file not found\")\n except PermissionError:\n raise BaseSpaceDownloadError(\"No permissions to read secret file\")", "def access_token_secret(self) -> Any:\n return pulumi.get(self, \"access_token_secret\")", "def decrypt_token(token):\n try:\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[ENCRYPTION_ALGORITHM])\n return payload\n\n except JWTError:\n # eg: jose.exceptions.ExpiredSignatureError: Signature has expired.\n return None", "def get_user_token(self, obj):\n return obj.get_session_token()", "def get_auth_token():\n\tcollection = get_mongo_collection()\n\treturn collection.find_one({'key_type' : 'access_token'})['key']", "def _get_auth_tok(root, resp):\n tok = root.findtext('{%s}X-API-Session' % c.WEB_NS)\n if not tok:\n resp.reqbody = \"<sensitive>\"\n msg = _(\"Failed to parse a session token from the PowerVM \"\n \"response.\")\n LOG.error(msg + (_(' Body= %s'), resp.body))\n raise pvmex.Error(msg, response=resp)\n return tok", "def get_oauth_token():\n return session.get('remote_oauth')", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def _get_token(self):\n return user.get_token()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing to do a scrap of consumed material.
def test_manufacturing_scrap(self): # Update demo products (self.product_4 | self.product_2).write({ 'tracking': 'lot', }) # Update Bill Of Material to remove product with phantom bom. self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink() # Create Inventory Adjustment For Stick and Stone Tools with lot. lot_product_4 = self.env['stock.production.lot'].create({ 'name': '0000000000001', 'product_id': self.product_4.id, 'company_id': self.env.company.id, }) lot_product_2 = self.env['stock.production.lot'].create({ 'name': '0000000000002', 'product_id': self.product_2.id, 'company_id': self.env.company.id, }) stock_inv_product_4 = self.env['stock.inventory'].create({ 'name': 'Stock Inventory for Stick', 'product_ids': [(4, self.product_4.id)], 'line_ids': [ (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}), ]}) stock_inv_product_2 = self.env['stock.inventory'].create({ 'name': 'Stock Inventory for Stone Tools', 'product_ids': [(4, self.product_2.id)], 'line_ids': [ (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id}) ]}) (stock_inv_product_4 | stock_inv_product_2)._action_start() stock_inv_product_2.action_validate() stock_inv_product_4.action_validate() #Create Manufacturing order. production_form = Form(self.env['mrp.production']) production_form.product_id = self.product_6 production_form.bom_id = self.bom_3 production_form.product_qty = 12 production_form.product_uom_id = self.product_6.uom_id production_3 = production_form.save() production_3.action_confirm() production_3.action_assign() # Check Manufacturing order's availability. self.assertEqual(production_3.reservation_state, 'assigned', "Production order's availability should be Available.") location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id, # Scrap Product Wood without lot to check assert raise ?. scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id}) with self.assertRaises(UserError): scrap_id.do_scrap() # Scrap Product Wood with lot. self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id}) #Check scrap move is created for this production order. #TODO: should check with scrap objects link in between
[ "def test_extract_recipe_from_website(self):\n pass", "def test_get_items_peas_get(self):\n pass", "def test_JCB_VISUAL_MATERIALS( self ):\n driver = self.driver\n driver.get(self.base_url + \"/record=b5660654~S6\")\n driver.find_element_by_link_text(\"Request\").click()\n self.assertTrue( 'aeon' in driver.current_url )\n self.assertTrue( 'ReferenceNumber=b5660654' in driver.current_url )\n self.assertTrue( 'ItemTitle=Thomas%20Jefferson' in driver.current_url )\n self.assertTrue( 'ItemAuthor=&ItemPublisher' in driver.current_url )\n self.assertTrue( 'ItemPublisher=Princeton' in driver.current_url )\n self.assertTrue( 'CallNumber=VHS' in driver.current_url )\n # self.assertTrue( 'Notes=(bibnum%3A%20b5660654)' in driver.current_url )\n self.assertEqual( 'ItemInfo2=', driver.current_url[-10:] )", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def test_material_list(self):\n\t\tmlist = MaterialList(self.job)\n\t\t_get = '/j/%d/material/%d' % (self.job.number, mlist.hash)\n\t\tresponse = self.client.get(_get)\n\n\t\tself.assert_200(response)\n\t\tself.assert_template_used('material_list.html')\n\t\t# TODO: test errors when MaterialList doesn't exist\n\t\t# TODO: test errors when AwardedJob doesn't exist\n\t\t# TODO: test creating MaterialList w/ HTTP POST file stream\n\t\t# TODO: test creating MaterialList w/ HTTP POST variable 'itemCounter'", "def test_get_recipe_information(self):\n pass", "def test_Scraping(self):\n print(\"testing Scraping construct\")\n\n aScrape = Scraping(\"realdonaldtrump\", 2)\n self.assertEqual(aScrape.r.url, \"https://twitter.com/realdonaldtrump\")\n self.assertEqual(aScrape.r.status_code, 200)", "def test_search_charge_item(self):\n pass", "def test_manufacturer_part_search(self):\n url = reverse('api-manufacturer-part-list')\n data = {'search': 'MPN'}\n response = self.get(url, data)\n self.assertEqual(len(response.data), 3)", "def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])", "def test_cards_get(self):\n pass", "def test_get_art_info(self):\n pass", "def test_quote(self):\n\t\tmlist = MaterialList(self.job)\n\t\tpost = {'materialListHash': mlist.hash,\n\t\t\t\t'quotePrice': 0.0,\n\t\t\t\t'vendor': 'test vendor',\n\t\t\t\t'quote': None}\n\n\t\t# TODO: test with and without file stream\n\n\t\turls = ('/j/%d/material/quote' % self.job.number,\n\t\t\t\t'/j/%d/material/%d/quote' % (self.job.number, mlist.hash))\n\t\tfor u in urls:\n\t\t\tresponse = self.client.post(u, data=post)\n\t\t\t_location = '/j/%d/material/%d' % (self.job.number, mlist.hash)\t\t# target url to verify\n\n\t\t\tself.assertRedirects(response, _location)", "def test_gethardwares_item(self):\n pass", "def test_get_analyzed_recipe_instructions(self):\n pass", "def run_read_test(self):\n\n\t\t#Tell the browser to only wait 3 additional seconds so can tell faster when finished reading\n\t\tself._browser.implicitly_wait(3.0)\n\n\t\t#Read one page at a time of current book\n\t\twhile(True):\n\t\t\ttry:\n\t\t\t\treadAgainElements=self._browser.find_elements_by_xpath(\"//li[contains(@data-speech, 'again')]\")\n\n\t\t\t\tif(len(readAgainElements)>0):\n\t\t\t\t\tprint \"finished reading book\"\n\t\t\t\t\tbreak\n\n\t\t\t\tnextPageLink=self._browser.find_element_by_link_text(\"Next\")\n\t\t\t\tnextPageLink.click()\n\n\t\t\texcept NoSuchElementException:\n\t\t\t\tprint \"error reading book\"\n\t\t\t\tbreak\n\n\t\t#Close browser\n\t\ttime.sleep(3.0)\n\t\tself._browser.close()", "def test_direct_usage(self):\n r = RscHtmlReader()\n fname = '10.1039_C6OB02074G.html'\n f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'rsc', fname), 'rb')\n content = f.read()\n d = r.readstring(content)\n self.assertEqual(len(d.elements), 60)", "def test_display_recipe(self):\n\n result = self.display_recipe(1, 1)\n self.assertIn('Hummus', result.data)", "def test_search_catering_item(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test checks a tracked manufactured product will go to location defined in putaway strategy when the production is recorded with product.produce wizard.
def test_putaway_after_manufacturing_3(self): self.laptop.tracking = 'serial' mo_laptop = self.new_mo_laptop() serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id}) mo_form = Form(mo_laptop) mo_form.qty_producing = 1 mo_form.lot_producing_id = serial mo_laptop = mo_form.save() mo_laptop.button_mark_done() # We check if the laptop go in the depot and not in the stock move = mo_laptop.move_finished_ids location_dest = move.move_line_ids.location_dest_id self.assertEqual(location_dest.id, self.depot_location.id) self.assertNotEqual(location_dest.id, self.stock_location.id)
[ "def test_generate_with_putaway(self):\n nbre_of_lines = 4\n shelf_location = self.env['stock.location'].create({\n 'name': 'shelf1',\n 'usage': 'internal',\n 'location_id': self.location_dest.id,\n })\n\n # Checks a first time without putaway...\n move = self.get_new_move(nbre_of_lines)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n form_wizard.next_serial_count = nbre_of_lines\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n # The location dest must be the default one.\n self.assertEqual(move_line.location_dest_id.id, self.location_dest.id)\n\n # We need to activate multi-locations to use putaway rules.\n grp_multi_loc = self.env.ref('stock.group_stock_multi_locations')\n self.env.user.write({'groups_id': [(4, grp_multi_loc.id)]})\n # Creates a putaway rule\n putaway_product = self.env['stock.putaway.rule'].create({\n 'product_id': self.product_serial.id,\n 'location_in_id': self.location_dest.id,\n 'location_out_id': shelf_location.id,\n })\n\n # Checks now with putaway...\n move = self.get_new_move(nbre_of_lines)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n form_wizard.next_serial_count = nbre_of_lines\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n # The location dest must be now the one from the putaway.\n self.assertEqual(move_line.location_dest_id.id, shelf_location.id)", "def test_flow_4(self):\n # Tick \"manufacture\" and MTO on self.comp2\n mto_route = self.env.ref('stock.route_warehouse0_mto')\n mto_route.active = True\n manufacture_route = self.env['stock.location.route'].search([('name', '=', 'Manufacture')])\n self.comp2.write({'route_ids': [(4, manufacture_route.id, None)]})\n self.comp2.write({'route_ids': [(4, mto_route.id, None)]})\n\n orderpoint_form = Form(self.env['stock.warehouse.orderpoint'])\n orderpoint_form.product_id = self.comp2\n orderpoint_form.product_min_qty = 0.0\n orderpoint_form.product_max_qty = 10.0\n orderpoint_form.location_id = self.env.company.subcontracting_location_id\n orderpoint = orderpoint_form.save()\n\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished\n move.product_uom_qty = 1\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n warehouse = picking_receipt.picking_type_id.warehouse_id\n\n # Pickings should directly be created\n mo = self.env['mrp.production'].search([('bom_id', '=', self.bom.id)])\n self.assertEqual(mo.state, 'confirmed')\n\n picking_delivery = mo.picking_ids\n self.assertFalse(picking_delivery)\n\n picking_delivery = self.env['stock.picking'].search([('origin', 'ilike', '%' + picking_receipt.name + '%')])\n self.assertFalse(picking_delivery)\n\n move = self.env['stock.move'].search([\n ('product_id', '=', self.comp2.id),\n ('location_id', '=', warehouse.lot_stock_id.id),\n ('location_dest_id', '=', self.env.company.subcontracting_location_id.id)\n ])\n self.assertTrue(move)\n picking_delivery = move.picking_id\n self.assertTrue(picking_delivery)\n self.assertEqual(move.product_uom_qty, 11.0)\n\n # As well as a manufacturing order for `self.comp2`\n comp2mo = self.env['mrp.production'].search([('bom_id', '=', self.comp2_bom.id)])\n self.assertEqual(len(comp2mo), 1)", "def test_manufacturing_scrap(self):\n\n # Update demo products\n (self.product_4 | self.product_2).write({\n 'tracking': 'lot',\n })\n\n # Update Bill Of Material to remove product with phantom bom.\n self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()\n\n # Create Inventory Adjustment For Stick and Stone Tools with lot.\n lot_product_4 = self.env['stock.production.lot'].create({\n 'name': '0000000000001',\n 'product_id': self.product_4.id,\n 'company_id': self.env.company.id,\n })\n lot_product_2 = self.env['stock.production.lot'].create({\n 'name': '0000000000002',\n 'product_id': self.product_2.id,\n 'company_id': self.env.company.id,\n })\n\n stock_inv_product_4 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stick',\n 'product_ids': [(4, self.product_4.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}),\n ]})\n\n stock_inv_product_2 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stone Tools',\n 'product_ids': [(4, self.product_2.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id})\n ]})\n (stock_inv_product_4 | stock_inv_product_2)._action_start()\n stock_inv_product_2.action_validate()\n stock_inv_product_4.action_validate()\n\n #Create Manufacturing order.\n production_form = Form(self.env['mrp.production'])\n production_form.product_id = self.product_6\n production_form.bom_id = self.bom_3\n production_form.product_qty = 12\n production_form.product_uom_id = self.product_6.uom_id\n production_3 = production_form.save()\n production_3.action_confirm()\n production_3.action_assign()\n\n # Check Manufacturing order's availability.\n self.assertEqual(production_3.reservation_state, 'assigned', \"Production order's availability should be Available.\")\n\n location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,\n\n # Scrap Product Wood without lot to check assert raise ?.\n scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})\n with self.assertRaises(UserError):\n scrap_id.do_scrap()\n\n # Scrap Product Wood with lot.\n self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})\n\n #Check scrap move is created for this production order.\n #TODO: should check with scrap objects link in between", "def test_flow_3(self):\n # Tick \"resupply subconractor on order\"\n resupply_sub_on_order_route = self.env['stock.location.route'].search([('name', '=', 'Resupply Subcontractor on Order')])\n (self.comp1 + self.comp2).write({'route_ids': [(4, resupply_sub_on_order_route.id, None)]})\n\n # Tick \"manufacture\" and MTO on self.comp2\n mto_route = self.env.ref('stock.route_warehouse0_mto')\n mto_route.active = True\n manufacture_route = self.env['stock.location.route'].search([('name', '=', 'Manufacture')])\n self.comp2.write({'route_ids': [(4, manufacture_route.id, None)]})\n self.comp2.write({'route_ids': [(4, mto_route.id, None)]})\n\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished\n move.product_uom_qty = 1\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n # Nothing should be tracked\n self.assertFalse(picking_receipt.display_action_record_components)\n\n # Pickings should directly be created\n mo = self.env['mrp.production'].search([('bom_id', '=', self.bom.id)])\n self.assertEqual(mo.state, 'confirmed')\n\n picking_delivery = mo.picking_ids\n self.assertEqual(len(picking_delivery), 1)\n self.assertEqual(len(picking_delivery.move_lines), 2)\n self.assertEqual(picking_delivery.origin, picking_receipt.name)\n self.assertEqual(picking_delivery.partner_id, picking_receipt.partner_id)\n\n # The picking should be a delivery order\n wh = picking_receipt.picking_type_id.warehouse_id\n self.assertEqual(mo.picking_ids.picking_type_id, wh.out_type_id)\n\n self.assertEqual(mo.picking_type_id, wh.subcontracting_type_id)\n self.assertFalse(mo.picking_type_id.active)\n\n # As well as a manufacturing order for `self.comp2`\n comp2mo = self.env['mrp.production'].search([('bom_id', '=', self.comp2_bom.id)])\n self.assertEqual(len(comp2mo), 1)\n picking_receipt.move_lines.quantity_done = 1\n picking_receipt.button_validate()\n self.assertEqual(mo.state, 'done')\n\n # Available quantities should be negative at the subcontracting location for each components\n avail_qty_comp1 = self.env['stock.quant']._get_available_quantity(self.comp1, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_comp2 = self.env['stock.quant']._get_available_quantity(self.comp2, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_finished = self.env['stock.quant']._get_available_quantity(self.finished, wh.lot_stock_id)\n self.assertEqual(avail_qty_comp1, -1)\n self.assertEqual(avail_qty_comp2, -1)\n self.assertEqual(avail_qty_finished, 1)", "def test_update_room_location(self):\n pass", "def test_single_quant_non_default_locations(self):\n pick = self.quant_1.create_picking(\n self.picking_type_pick,\n location_id=self.test_stock_location_01.id,\n location_dest_id=self.test_goodsout_location_02.id,\n )\n # Confirm default location used if non specified\n self.assertEqual(pick.location_id, self.test_stock_location_01)\n self.assertNotEqual(pick.location_id, self.picking_type_pick.default_location_src_id)\n # Confirm default dest location used if non specified\n self.assertEqual(pick.location_dest_id, self.test_goodsout_location_02)\n self.assertNotEqual(pick.location_id, self.picking_type_pick.default_location_dest_id)", "def test_flow_2(self):\n # Tick \"resupply subconractor on order\"\n resupply_sub_on_order_route = self.env['stock.location.route'].search([('name', '=', 'Resupply Subcontractor on Order')])\n (self.comp1 + self.comp2).write({'route_ids': [(4, resupply_sub_on_order_route.id, None)]})\n # Create a different subcontract location\n partner_subcontract_location = self.env['stock.location'].create({\n 'name': 'Specific partner location',\n 'location_id': self.env.ref('stock.stock_location_locations_partner').id,\n 'usage': 'internal',\n 'company_id': self.env.company.id,\n })\n self.subcontractor_partner1.property_stock_subcontractor = partner_subcontract_location.id\n resupply_rule = resupply_sub_on_order_route.rule_ids.filtered(lambda l:\n l.location_id == self.comp1.property_stock_production and\n l.location_src_id == self.env.company.subcontracting_location_id)\n resupply_rule.copy({'location_src_id': partner_subcontract_location.id})\n resupply_warehouse_rule = self.warehouse.route_ids.rule_ids.filtered(lambda l:\n l.location_id == self.env.company.subcontracting_location_id and\n l.location_src_id == self.warehouse.lot_stock_id)\n resupply_warehouse_rule.copy({'location_id': partner_subcontract_location.id})\n\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished\n move.product_uom_qty = 1\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n # Nothing should be tracked\n self.assertFalse(picking_receipt.display_action_record_components)\n\n # Pickings should directly be created\n mo = self.env['mrp.production'].search([('bom_id', '=', self.bom.id)])\n self.assertEqual(len(mo.picking_ids), 1)\n self.assertEqual(mo.state, 'confirmed')\n self.assertEqual(len(mo.picking_ids.move_lines), 2)\n\n picking = mo.picking_ids\n wh = picking.picking_type_id.warehouse_id\n\n # The picking should be a delivery order\n self.assertEqual(picking.picking_type_id, wh.out_type_id)\n\n self.assertEqual(mo.picking_type_id, wh.subcontracting_type_id)\n self.assertFalse(mo.picking_type_id.active)\n\n # No manufacturing order for `self.comp2`\n comp2mo = self.env['mrp.production'].search([('bom_id', '=', self.comp2_bom.id)])\n self.assertEqual(len(comp2mo), 0)\n\n picking_receipt.move_lines.quantity_done = 1\n picking_receipt.button_validate()\n self.assertEqual(mo.state, 'done')\n\n # Available quantities should be negative at the subcontracting location for each components\n avail_qty_comp1 = self.env['stock.quant']._get_available_quantity(self.comp1, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_comp2 = self.env['stock.quant']._get_available_quantity(self.comp2, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_finished = self.env['stock.quant']._get_available_quantity(self.finished, wh.lot_stock_id)\n self.assertEqual(avail_qty_comp1, -1)\n self.assertEqual(avail_qty_comp2, -1)\n self.assertEqual(avail_qty_finished, 1)\n\n avail_qty_comp1_in_global_location = self.env['stock.quant']._get_available_quantity(self.comp1, self.env.company.subcontracting_location_id, allow_negative=True)\n avail_qty_comp2_in_global_location = self.env['stock.quant']._get_available_quantity(self.comp2, self.env.company.subcontracting_location_id, allow_negative=True)\n self.assertEqual(avail_qty_comp1_in_global_location, 0.0)\n self.assertEqual(avail_qty_comp2_in_global_location, 0.0)", "def test_flow_tracked_only_finished(self):\n self.finished_product.tracking = \"serial\"\n self.comp1_sn.tracking = \"none\"\n nb_finished_product = 3\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished_product\n move.product_uom_qty = nb_finished_product\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n # We shouldn't be able to call the 'record_components' button\n self.assertFalse(picking_receipt.display_action_record_components)\n\n wh = picking_receipt.picking_type_id.warehouse_id\n lot_names_finished = [f\"subtracked_{i}\" for i in range(nb_finished_product)]\n\n move_details = Form(picking_receipt.move_lines, view='stock.view_stock_move_nosuggest_operations')\n for lot_name in lot_names_finished:\n with move_details.move_line_nosuggest_ids.new() as ml:\n ml.qty_done = 1\n ml.lot_name = lot_name\n move_details.save()\n\n picking_receipt.button_validate()\n # Check the created manufacturing order\n # Should have one mo by serial number\n mos = picking_receipt.move_lines.move_orig_ids.production_id\n self.assertEqual(len(mos), nb_finished_product)\n self.assertEqual(mos.mapped(\"state\"), [\"done\"] * nb_finished_product)\n self.assertEqual(mos.picking_type_id, wh.subcontracting_type_id)\n self.assertFalse(mos.picking_type_id.active)\n self.assertEqual(set(mos.lot_producing_id.mapped(\"name\")), set(lot_names_finished))\n\n # Available quantities should be negative at the subcontracting location for each components\n avail_qty_comp1 = self.env['stock.quant']._get_available_quantity(self.comp1_sn, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_comp2 = self.env['stock.quant']._get_available_quantity(self.comp2, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_finished = self.env['stock.quant']._get_available_quantity(self.finished_product, wh.lot_stock_id)\n self.assertEqual(avail_qty_comp1, -nb_finished_product)\n self.assertEqual(avail_qty_comp2, -nb_finished_product)\n self.assertEqual(avail_qty_finished, nb_finished_product)", "def test_update_function_room_location(self):\n pass", "def test_update_goal(self):\n pass", "def test_ordering_product_on_swaglabs_and_getting_confirmation(browser):", "def test_arrival_page(self):\n self.add_product(self.TESTPRODUCT1, 1)\n\n # Existing product\n rv = self.app.post('/user', data=dict(barcode=self.TESTPRODUCT1['barcode']), follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data\n\n # Nonexistent product (albeit for incorrect requests)\n rv = self.app.post('/product', data=dict(barcode='nothing'), follow_redirects=True)\n assert b'Produkten eller användaren existerar inte!' in rv.data", "def test_modify_coupon_biz_location(self):\n email = 'user114@company.com' # must match email of advertiser.id: 114\n postal_code = '12601'\n advertiser = Advertiser.objects.get(email=email)\n self.assertTrue(advertiser is not None)\n business = advertiser.businesses.all()[0]\n location = Location(business=business, \n location_address1='address1', location_address2='address2',\n location_city='primary_address_city',\n location_state_province='NY',\n location_zip_postal=postal_code,\n location_url='')\n location.save()\n phone = '201-444-1223'\n website = 'http://10testing455.com'\n # soap modify time is more recent than location create date time\n now = datetime.datetime.now() + datetime.timedelta(days=1)\n datetime_format = '%Y-%m-%d %H:%M:%S'\n now_format = now.strftime(datetime_format)\n account_dict = {'name': business.business_name, \n 'phone_office': phone, 'email1': email, \n 'business_id_c': 114, 'website': website,\n 'date_modified': now_format, 'modify': True}\n contact_dict = {'primary_address_postalcode': postal_code, \n 'last_name': 'LastName', 'primary_address_city': 'city', \n 'first_name': 'FirstName', 'primary_address_state': 'st', \n 'primary_address_street': '13 addr1 st\\nPO BOX addr2\\naddr3', \n 'advertiser_id_c': 114, 'email1': email, \n 'date_modified': now_format, 'phone_work': phone, 'modify': True}\n sync_coupon_business(self.sugar, account_dict, contact_dict, \n modify_mode=True)\n # test for location modification\n for location in business.locations.all():\n if (location.location_area_code + '-' + \n location.location_exchange + '-' +\n location.location_number) == phone:\n break\n if location:\n self.assertTrue(location.location_number in phone)\n self.assertEquals(location.location_zip_postal, postal_code)\n self.assertEquals(location.location_url, website)\n self.assertEquals(location.location_address1 + \n location.location_address2 + location.location_city + \n location.location_state_province, \n '13 addr1 stPO BOX addr2 addr3cityst')\n else:\n self.fail('No location.')", "def test_products_location_query_param(self):\n\n # Create initial product\n self.test_create_product()\n\n # Attempt to get our product based on the location\n url = \"/products?location=Pittsburgh\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.get(url, None, format='json')\n json_response = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(json_response), 1)\n self.assertEqual(json_response[0][\"name\"], \"Kite\")\n self.assertEqual(json_response[0][\"price\"], 14.99)\n self.assertEqual(json_response[0][\"quantity\"], 60)\n self.assertEqual(json_response[0][\"description\"], \"It flies high\")\n self.assertEqual(json_response[0][\"location\"], \"Pittsburgh\")\n\n # Attempt to get a product by location that doesn't exist\n url = \"/products?location=Philadelphia\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.get(url, None, format='json')\n json_response = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(json_response), 0)", "def test_update_room_location_area(self):\n pass", "def test_update_putaway(self):\n pass", "def test_update_room_location_classification(self):\n pass", "def test_search_room_location(self):\n pass", "def test_update_event_meal_plan(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure a kit is split in the corrects quantity_done by components in case of an immediate transfer.
def test_kit_immediate_transfer(self): picking = self.env['stock.picking'].create({ 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, 'partner_id': self.test_partner.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'immediate_transfer': True }) move_receipt_1 = self.env['stock.move'].create({ 'name': self.kit_parent.name, 'product_id': self.kit_parent.id, 'quantity_done': 3, 'product_uom': self.kit_parent.uom_id.id, 'picking_id': picking.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, }) picking.button_validate() # We check that the picking has the correct quantities after its move were splitted. self.assertEqual(len(picking.move_lines), 7) for move_line in picking.move_lines: self.assertEqual(move_line.quantity_done, self.expected_quantities[move_line.product_id])
[ "def test_flow_tracked_only_finished(self):\n self.finished_product.tracking = \"serial\"\n self.comp1_sn.tracking = \"none\"\n nb_finished_product = 3\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished_product\n move.product_uom_qty = nb_finished_product\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n # We shouldn't be able to call the 'record_components' button\n self.assertFalse(picking_receipt.display_action_record_components)\n\n wh = picking_receipt.picking_type_id.warehouse_id\n lot_names_finished = [f\"subtracked_{i}\" for i in range(nb_finished_product)]\n\n move_details = Form(picking_receipt.move_lines, view='stock.view_stock_move_nosuggest_operations')\n for lot_name in lot_names_finished:\n with move_details.move_line_nosuggest_ids.new() as ml:\n ml.qty_done = 1\n ml.lot_name = lot_name\n move_details.save()\n\n picking_receipt.button_validate()\n # Check the created manufacturing order\n # Should have one mo by serial number\n mos = picking_receipt.move_lines.move_orig_ids.production_id\n self.assertEqual(len(mos), nb_finished_product)\n self.assertEqual(mos.mapped(\"state\"), [\"done\"] * nb_finished_product)\n self.assertEqual(mos.picking_type_id, wh.subcontracting_type_id)\n self.assertFalse(mos.picking_type_id.active)\n self.assertEqual(set(mos.lot_producing_id.mapped(\"name\")), set(lot_names_finished))\n\n # Available quantities should be negative at the subcontracting location for each components\n avail_qty_comp1 = self.env['stock.quant']._get_available_quantity(self.comp1_sn, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_comp2 = self.env['stock.quant']._get_available_quantity(self.comp2, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_finished = self.env['stock.quant']._get_available_quantity(self.finished_product, wh.lot_stock_id)\n self.assertEqual(avail_qty_comp1, -nb_finished_product)\n self.assertEqual(avail_qty_comp2, -nb_finished_product)\n self.assertEqual(avail_qty_finished, nb_finished_product)", "def test_kit_planned_transfer(self):\n picking = self.env['stock.picking'].create({\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n 'partner_id': self.test_partner.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'immediate_transfer': False,\n })\n move_receipt_1 = self.env['stock.move'].create({\n 'name': self.kit_parent.name,\n 'product_id': self.kit_parent.id,\n 'product_uom_qty': 3,\n 'product_uom': self.kit_parent.uom_id.id,\n 'picking_id': picking.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n })\n picking.action_confirm()\n\n # We check that the picking has the correct quantities after its move were splitted.\n self.assertEqual(len(picking.move_lines), 7)\n for move_line in picking.move_lines:\n self.assertEqual(move_line.product_qty, self.expected_quantities[move_line.product_id])", "def _check_overprocessed_subcontract_qty(self):\n overprocessed_moves = self.env['stock.move']\n for move in self:\n if not move.is_subcontract:\n continue\n # Extra quantity is allowed when components do not need to be register\n if not move._has_tracked_subcontract_components():\n continue\n rounding = move.product_uom.rounding\n if float_compare(move.quantity_done, move.move_orig_ids.production_id.qty_produced, precision_rounding=rounding) > 0:\n overprocessed_moves |= move\n if overprocessed_moves:\n raise UserError(_(\"\"\"\nYou have to use 'Records Components' button in order to register quantity for a\nsubcontracted product(s) with tracked component(s):\n %s.\nIf you want to process more than initially planned, you\ncan use the edit + unlock buttons in order to adapt the initial demand on the\noperations.\"\"\") % ('\\n'.join(overprocessed_moves.mapped('product_id.display_name'))))", "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def _action_done(self):\n\n # First, we loop over all the move lines to do a preliminary check: `qty_done` should not\n # be negative and, according to the presence of a picking type or a linked inventory\n # adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlink\n # the line. It is mandatory in order to free the reservation and correctly apply\n # `action_done` on the next move lines.\n ml_to_delete = self.env['stock.move.line']\n for ml in self:\n # Check here if `ml.qty_done` respects the rounding of `ml.product_uom_id`.\n uom_qty = float_round(ml.qty_done, precision_rounding=ml.product_uom_id.rounding, rounding_method='HALF-UP')\n precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n qty_done = float_round(ml.qty_done, precision_digits=precision_digits, rounding_method='HALF-UP')\n if float_compare(uom_qty, qty_done, precision_digits=precision_digits) != 0:\n raise UserError(_('The quantity done for the product \"%s\" doesn\\'t respect the rounding precision \\\n defined on the unit of measure \"%s\". Please change the quantity done or the \\\n rounding precision of your unit of measure.') % (\n ml.product_id.display_name, ml.product_uom_id.name))\n\n qty_done_float_compared = float_compare(ml.qty_done, 0, precision_rounding=ml.product_uom_id.rounding)\n if qty_done_float_compared > 0:\n if ml.product_id.tracking != 'none':\n picking_type_id = ml.move_id.picking_type_id\n if picking_type_id:\n if picking_type_id.use_create_lots:\n # If a picking type is linked, we may have to create a production lot on\n # the fly before assigning it to the move line if the user checked both\n # `use_create_lots` and `use_existing_lots`.\n if ml.lot_name and not ml.lot_id:\n tmpl_id = ml.product_id.product_tmpl_id\n product_template = self.env['product.template'].search([('id', '=', int(tmpl_id))])\n params = self.env['ir.config_parameter'].sudo()\n production_lot_alert_days = int(\n params.get_param('inventory_extension.production_lot_alert_days'))\n if ml.lot_expired_date and not ml.lot_expired_date is None:\n final_date = fields.Datetime.from_string(ml.lot_expired_date)\n if production_lot_alert_days > 0:\n alert_date = final_date.date() - datetime.timedelta(days=production_lot_alert_days)\n else:\n alert_date = final_date.date() - datetime.timedelta(days=3)\n lot = self.env['stock.production.lot'].create(\n {'name': ml.lot_name, 'use_date': ml.lot_expired_date,\n 'removal_date': ml.lot_expired_date, 'life_date': ml.lot_expired_date,\n 'alert_date': str(alert_date), 'product_id': ml.product_id.id})\n else:\n lot = self.env['stock.production.lot'].create(\n {'name': ml.lot_name,'product_id': ml.product_id.id})\n\n ml.write({'lot_id': lot.id})\n elif not picking_type_id.use_create_lots and not picking_type_id.use_existing_lots:\n # If the user disabled both `use_create_lots` and `use_existing_lots`\n # checkboxes on the picking type, he's allowed to enter tracked\n # products without a `lot_id`.\n continue\n elif ml.move_id.inventory_id:\n # If an inventory adjustment is linked, the user is allowed to enter\n # tracked products without a `lot_id`.\n continue\n\n if not ml.lot_id:\n raise UserError(_('You need to supply a lot/serial number for %s.') % ml.product_id.name)\n elif qty_done_float_compared < 0:\n raise UserError(_('No negative quantities allowed'))\n else:\n ml_to_delete |= ml\n ml_to_delete.unlink()\n\n # Now, we can actually move the quant.\n done_ml = self.env['stock.move.line']\n for ml in self - ml_to_delete:\n if ml.product_id.type == 'product':\n Quant = self.env['stock.quant']\n rounding = ml.product_uom_id.rounding\n\n # if this move line is force assigned, unreserve elsewhere if needed\n if not ml.location_id.should_bypass_reservation() and float_compare(ml.qty_done, ml.product_qty,\n precision_rounding=rounding) > 0:\n extra_qty = ml.qty_done - ml.product_qty\n ml._free_reservation(ml.product_id, ml.location_id, extra_qty, lot_id=ml.lot_id,\n package_id=ml.package_id, owner_id=ml.owner_id, ml_to_ignore=done_ml)\n # unreserve what's been reserved\n if not ml.location_id.should_bypass_reservation() and ml.product_id.type == 'product' and ml.product_qty:\n try:\n Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty,\n lot_id=ml.lot_id, package_id=ml.package_id,\n owner_id=ml.owner_id, strict=True)\n except UserError:\n Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=False,\n package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\n\n # move what's been actually done\n quantity = ml.product_uom_id._compute_quantity(ml.qty_done, ml.move_id.product_id.uom_id,\n rounding_method='HALF-UP')\n available_qty, in_date = Quant._update_available_quantity(ml.product_id, ml.location_id, -quantity,\n lot_id=ml.lot_id, package_id=ml.package_id,\n owner_id=ml.owner_id)\n if available_qty < 0 and ml.lot_id:\n # see if we can compensate the negative quants with some untracked quants\n untracked_qty = Quant._get_available_quantity(ml.product_id, ml.location_id, lot_id=False,\n package_id=ml.package_id, owner_id=ml.owner_id,\n strict=True)\n if untracked_qty:\n taken_from_untracked_qty = min(untracked_qty, abs(quantity))\n Quant._update_available_quantity(ml.product_id, ml.location_id, -taken_from_untracked_qty,\n lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id)\n Quant._update_available_quantity(ml.product_id, ml.location_id, taken_from_untracked_qty,\n lot_id=ml.lot_id, package_id=ml.package_id,\n owner_id=ml.owner_id)\n Quant._update_available_quantity(ml.product_id, ml.location_dest_id, quantity, lot_id=ml.lot_id,\n package_id=ml.result_package_id, owner_id=ml.owner_id, in_date=in_date)\n done_ml |= ml\n # Reset the reserved quantity as we just moved it to the destination location.\n (self - ml_to_delete).with_context(bypass_reservation_update=True).write(\n {'product_uom_qty': 0.00, 'date': fields.Datetime.now(), })", "def _action_done(self):\r\n\r\n # First, we loop over all the move lines to do a preliminary check: `qty_done` should not\r\n # be negative and, according to the presence of a picking type or a linked inventory\r\n # adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlink\r\n # the line. It is mandatory in order to free the reservation and correctly apply\r\n # `action_done` on the next move lines.\r\n ml_to_delete = self.env['stock.move.line']\r\n for ml in self:\r\n # Check here if `ml.qty_done` respects the rounding of `ml.product_uom_id`.\r\n uom_qty = float_round(ml.qty_done, precision_rounding=ml.product_uom_id.rounding, rounding_method='HALF-UP')\r\n precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')\r\n qty_done = float_round(ml.qty_done, precision_digits=precision_digits, rounding_method='HALF-UP')\r\n if float_compare(uom_qty, qty_done, precision_digits=precision_digits) != 0:\r\n raise UserError(_('The quantity done for the product \"%s\" doesn\\'t respect the rounding precision \\\r\n defined on the unit of measure \"%s\". Please change the quantity done or the \\\r\n rounding precision of your unit of measure.') % (ml.product_id.display_name, ml.product_uom_id.name))\r\n\r\n qty_done_float_compared = float_compare(ml.qty_done, 0, precision_rounding=ml.product_uom_id.rounding)\r\n if qty_done_float_compared > 0:\r\n if ml.product_id.tracking != 'none':\r\n picking_type_id = ml.move_id.picking_type_id\r\n if picking_type_id:\r\n if picking_type_id.use_create_lots:\r\n # If a picking type is linked, we may have to create a production lot on\r\n # the fly before assigning it to the move line if the user checked both\r\n # `use_create_lots` and `use_existing_lots`.\r\n if ml.lot_name and ml.date_reference and not ml.lot_id:\r\n lot = self.env['stock.production.lot'].create(\r\n {'name': ml.lot_name, 'product_id': ml.product_id.id, 'date_refer': ml.date_reference}\r\n )\r\n ml.write({'lot_id': lot.id})\r\n data_dates = ml.lot_id._get_dattes(ml.product_id.id,ml.date_reference)\r\n for field, value in data_dates.items():\r\n setattr(ml.lot_id, field, value)\r\n elif not picking_type_id.use_create_lots and not picking_type_id.use_existing_lots:\r\n # If the user disabled both `use_create_lots` and `use_existing_lots`\r\n # checkboxes on the picking type, he's allowed to enter tracked\r\n # products without a `lot_id`.\r\n continue\r\n elif ml.move_id.inventory_id:\r\n # If an inventory adjustment is linked, the user is allowed to enter\r\n # tracked products without a `lot_id`.\r\n continue\r\n\r\n if not ml.lot_id:\r\n raise UserError(_('You need to supply a lot/serial number for %s.') % ml.product_id.name)\r\n elif qty_done_float_compared < 0:\r\n raise UserError(_('No negative quantities allowed'))\r\n else:\r\n ml_to_delete |= ml\r\n ml_to_delete.unlink()\r\n\r\n # Now, we can actually move the quant.\r\n done_ml = self.env['stock.move.line']\r\n for ml in self - ml_to_delete:\r\n if ml.product_id.type == 'product':\r\n Quant = self.env['stock.quant']\r\n rounding = ml.product_uom_id.rounding\r\n\r\n # if this move line is force assigned, unreserve elsewhere if needed\r\n if not ml.location_id.should_bypass_reservation() and float_compare(ml.qty_done, ml.product_qty, precision_rounding=rounding) > 0:\r\n extra_qty = ml.qty_done - ml.product_qty\r\n ml._free_reservation(ml.product_id, ml.location_id, extra_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, ml_to_ignore=done_ml)\r\n # unreserve what's been reserved\r\n if not ml.location_id.should_bypass_reservation() and ml.product_id.type == 'product' and ml.product_qty:\r\n try:\r\n Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\r\n except UserError:\r\n Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\r\n\r\n # move what's been actually done\r\n quantity = ml.product_uom_id._compute_quantity(ml.qty_done, ml.move_id.product_id.uom_id, rounding_method='HALF-UP')\r\n available_qty, in_date = Quant._update_available_quantity(ml.product_id, ml.location_id, -quantity, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)\r\n if available_qty < 0 and ml.lot_id:\r\n # see if we can compensate the negative quants with some untracked quants\r\n untracked_qty = Quant._get_available_quantity(ml.product_id, ml.location_id, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\r\n if untracked_qty:\r\n taken_from_untracked_qty = min(untracked_qty, abs(quantity))\r\n Quant._update_available_quantity(ml.product_id, ml.location_id, -taken_from_untracked_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id)\r\n Quant._update_available_quantity(ml.product_id, ml.location_id, taken_from_untracked_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)\r\n Quant._update_available_quantity(ml.product_id, ml.location_dest_id, quantity, lot_id=ml.lot_id, package_id=ml.result_package_id, owner_id=ml.owner_id, in_date=in_date)\r\n done_ml |= ml\r\n # Reset the reserved quantity as we just moved it to the destination location.\r\n (self - ml_to_delete).with_context(bypass_reservation_update=True).write({\r\n 'product_uom_qty': 0.00,\r\n 'date': fields.Datetime.now(),\r\n })", "def test_putaway_after_manufacturing_3(self):\n self.laptop.tracking = 'serial'\n mo_laptop = self.new_mo_laptop()\n serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id})\n\n mo_form = Form(mo_laptop)\n mo_form.qty_producing = 1\n mo_form.lot_producing_id = serial\n mo_laptop = mo_form.save()\n mo_laptop.button_mark_done()\n\n # We check if the laptop go in the depot and not in the stock\n move = mo_laptop.move_finished_ids\n location_dest = move.move_line_ids.location_dest_id\n self.assertEqual(location_dest.id, self.depot_location.id)\n self.assertNotEqual(location_dest.id, self.stock_location.id)", "def _compute_qty(self):\n uom_obj = self.env['product.uom']\n move_obj = self.env['stock.move']\n pol_obj = self.env['purchase.order.line']\n pol_rm_rcs = pol_obj.search([('wo_rm_subcontracting_id', '=', self.wo_id.id), \n ('product_id', '=', self.product_id.id), \n ('purchase_state', 'in', ('draft', 'waiting'))])\n \n move_rm_rcs = move_obj.search([('wo_rm_subcontracting_id', '=', self.wo_id.id), \n ('product_id', '=', self.product_id.id), \n ('state', '!=', 'cancel'),\n '|', ('type_subcontracting', '!=', 'indirect'), \n '&', ('type_subcontracting', '=', 'indirect'), ('type', '=', 'out')])\n ordered_qty = 0\n taken_fromstock_qty = 0\n out_qty = 0\n soon_out_qty = 0\n # Calcule de la qté commandée, prise sur le stock et envoyé\n for pol_rm in pol_rm_rcs:\n qty = uom_obj._compute_qty_obj(pol_rm.uom_id, pol_rm.uom_qty, self.rm_fp_draft_id.uom_id, with_raise=True, with_round=False)\n soon_out_qty += qty\n if pol_rm.type_subcontracting == 'stock':\n taken_fromstock_qty += qty\n elif pol_rm.type_subcontracting == 'indirect':\n ordered_qty += qty\n else:\n ordered_qty += qty\n \n \n for move_rm in move_rm_rcs:\n qty = uom_obj._compute_qty_obj(move_rm.uom_id, move_rm.uom_qty, self.rm_fp_draft_id.uom_id, with_raise=True, with_round=False)\n out_qty += qty\n if move_rm.type_subcontracting == 'stock':\n taken_fromstock_qty += qty\n elif move_rm.type_subcontracting == 'indirect':\n ordered_qty += qty\n else:\n ordered_qty += qty \n \n # Si c'est le dernier OT de l'OF, déclaration du produit final\n# TODO THOMAS: commenté car non utilisé. À supprimer ulterieurement\n# last_wo = False\n# if not self.wo_id.next_wo_ids:\n# last_wo = True\n# else:\n# last_wo = True\n# for next_wo in self.wo_id.next_wo_ids:\n# if next_wo.mo_id.id == self.wo_id.mo_id.id:\n# last_wo = False\n# break\n# \n# if last_wo:\n# product_fp_id = self.wo_id.final_product_id.id\n# else:\n# product_fp_id = False\n# for fp in self.wo_id.fp_draft_ids:\n# if fp.product_id.is_int:\n# product_fp_id = fp.product_id.id\n \n # Calcule de la qté déjà utilisée et la déduction des qtés déjà achetées mais non utilisées\n pol_fp_rcs = pol_obj.search([('wo_fp_subcontracting_id', '=', self.wo_id.id)])\n qty_product_final_use = 0\n po_ids = []\n for pol_fp in pol_fp_rcs:\n if pol_fp.purchase_order_id.id not in po_ids and pol_fp.purchase_state != 'cancel':\n po_ids.append(pol_fp.purchase_order_id.id)\n qty_product_final_use += pol_fp.wo_qty\n \n wo_in_use_qty = self.rm_fp_draft_id.efficient_unit_qty * qty_product_final_use\n # Initialisation des champs fonctions\n self.virtual_stock = self.product_id.virtual_stock\n self.wo_total_ordered_qty = ordered_qty\n self.taken_fromstock_qty = taken_fromstock_qty\n self.wo_total_remaining_qty = (self.rm_fp_draft_id.efficient_unit_qty*self.wo_id.quantity) - ordered_qty - taken_fromstock_qty\n if self.rm_fp_draft_id.type_qty == 'variable':\n need_qty = self.rm_fp_draft_id.efficient_unit_qty * self.wiz_qty or self.wiz_qty \n else:\n need_qty = self.rm_fp_draft_id.uom_qty\n \n self.need_qty = need_qty\n self.uom_id = self.rm_fp_draft_id and self.rm_fp_draft_id.uom_id.id or self.product_id.uom_id.id \n self.wo_delivred_qty = out_qty\n self.wo_soon_delivred_qty = soon_out_qty\n self.forecast_delivred_qty = self.wo_delivred_qty + self.wo_soon_delivred_qty\n self.wo_in_use_qty = wo_in_use_qty\n if self.need_qty < ((self.wo_delivred_qty+self.wo_soon_delivred_qty)-self.wo_in_use_qty):\n self.wo_remaining_need_qty = 0\n else:\n self.wo_remaining_need_qty = self.need_qty - ((self.wo_delivred_qty+self.wo_soon_delivred_qty) - self.wo_in_use_qty)\n \n # Calcule des quantités libres dans l'emplacement de st\n warehouse_rs = self.wo_id.first_resource_id and self.wo_id.first_resource_id.location_id.warehouse_id or False\n if warehouse_rs:\n self.release_virtual_qty, self.release_qty = self.product_id.get_available_stock(return_real_stock=True, warehouse_id=warehouse_rs.id)\n else:\n self.release_qty = 0.0\n self.release_virtual_qty = 0.0", "def test_flow_4(self):\n # Tick \"manufacture\" and MTO on self.comp2\n mto_route = self.env.ref('stock.route_warehouse0_mto')\n mto_route.active = True\n manufacture_route = self.env['stock.location.route'].search([('name', '=', 'Manufacture')])\n self.comp2.write({'route_ids': [(4, manufacture_route.id, None)]})\n self.comp2.write({'route_ids': [(4, mto_route.id, None)]})\n\n orderpoint_form = Form(self.env['stock.warehouse.orderpoint'])\n orderpoint_form.product_id = self.comp2\n orderpoint_form.product_min_qty = 0.0\n orderpoint_form.product_max_qty = 10.0\n orderpoint_form.location_id = self.env.company.subcontracting_location_id\n orderpoint = orderpoint_form.save()\n\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished\n move.product_uom_qty = 1\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n warehouse = picking_receipt.picking_type_id.warehouse_id\n\n # Pickings should directly be created\n mo = self.env['mrp.production'].search([('bom_id', '=', self.bom.id)])\n self.assertEqual(mo.state, 'confirmed')\n\n picking_delivery = mo.picking_ids\n self.assertFalse(picking_delivery)\n\n picking_delivery = self.env['stock.picking'].search([('origin', 'ilike', '%' + picking_receipt.name + '%')])\n self.assertFalse(picking_delivery)\n\n move = self.env['stock.move'].search([\n ('product_id', '=', self.comp2.id),\n ('location_id', '=', warehouse.lot_stock_id.id),\n ('location_dest_id', '=', self.env.company.subcontracting_location_id.id)\n ])\n self.assertTrue(move)\n picking_delivery = move.picking_id\n self.assertTrue(picking_delivery)\n self.assertEqual(move.product_uom_qty, 11.0)\n\n # As well as a manufacturing order for `self.comp2`\n comp2mo = self.env['mrp.production'].search([('bom_id', '=', self.comp2_bom.id)])\n self.assertEqual(len(comp2mo), 1)", "def test_flow_3(self):\n # Tick \"resupply subconractor on order\"\n resupply_sub_on_order_route = self.env['stock.location.route'].search([('name', '=', 'Resupply Subcontractor on Order')])\n (self.comp1 + self.comp2).write({'route_ids': [(4, resupply_sub_on_order_route.id, None)]})\n\n # Tick \"manufacture\" and MTO on self.comp2\n mto_route = self.env.ref('stock.route_warehouse0_mto')\n mto_route.active = True\n manufacture_route = self.env['stock.location.route'].search([('name', '=', 'Manufacture')])\n self.comp2.write({'route_ids': [(4, manufacture_route.id, None)]})\n self.comp2.write({'route_ids': [(4, mto_route.id, None)]})\n\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished\n move.product_uom_qty = 1\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n # Nothing should be tracked\n self.assertFalse(picking_receipt.display_action_record_components)\n\n # Pickings should directly be created\n mo = self.env['mrp.production'].search([('bom_id', '=', self.bom.id)])\n self.assertEqual(mo.state, 'confirmed')\n\n picking_delivery = mo.picking_ids\n self.assertEqual(len(picking_delivery), 1)\n self.assertEqual(len(picking_delivery.move_lines), 2)\n self.assertEqual(picking_delivery.origin, picking_receipt.name)\n self.assertEqual(picking_delivery.partner_id, picking_receipt.partner_id)\n\n # The picking should be a delivery order\n wh = picking_receipt.picking_type_id.warehouse_id\n self.assertEqual(mo.picking_ids.picking_type_id, wh.out_type_id)\n\n self.assertEqual(mo.picking_type_id, wh.subcontracting_type_id)\n self.assertFalse(mo.picking_type_id.active)\n\n # As well as a manufacturing order for `self.comp2`\n comp2mo = self.env['mrp.production'].search([('bom_id', '=', self.comp2_bom.id)])\n self.assertEqual(len(comp2mo), 1)\n picking_receipt.move_lines.quantity_done = 1\n picking_receipt.button_validate()\n self.assertEqual(mo.state, 'done')\n\n # Available quantities should be negative at the subcontracting location for each components\n avail_qty_comp1 = self.env['stock.quant']._get_available_quantity(self.comp1, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_comp2 = self.env['stock.quant']._get_available_quantity(self.comp2, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_finished = self.env['stock.quant']._get_available_quantity(self.finished, wh.lot_stock_id)\n self.assertEqual(avail_qty_comp1, -1)\n self.assertEqual(avail_qty_comp2, -1)\n self.assertEqual(avail_qty_finished, 1)", "def test_change_quantity_before_checkout(self):\n self.winniethepooh = Client()\n User.objects.create(email=\"winnie@thepooh.com\")\n user = User.objects.get(email=\"winnie@thepooh.com\")\n self.winniethepooh.force_login(user)\n Cart.objects.create(\n phone_model_item=self.samsung_note_5_rose_gold,\n quantity=2, owner=user)\n cart = Cart.objects.get(\n phone_model_item=self.samsung_note_5_rose_gold,\n quantity=2, owner=user)\n Cart.objects.create(\n phone_model_item=self.samsung_note_7_rose_gold,\n quantity=1, owner=user)\n cart_2 = Cart.objects.get(\n phone_model_item=self.samsung_note_7_rose_gold,\n quantity=1, owner=user)\n response = self.winniethepooh.get(\"/before_checkout\")\n self.assertContains(response, \"Samsung Note 5\")\n self.assertContains(response, \"50,000\")\n self.assertContains(response, \"75,000\")\n cart_2_data = {\"quantity\": 2, \"cart_id_quantity\": cart_2.id}\n self.winniethepooh.post(\"/before_checkout\", cart_2_data)\n data = {\"quantity\": 3, \"cart_id_quantity\": cart.id}\n after_save_response = self.winniethepooh.post(\n \"/before_checkout\", data)\n self.assertEqual(after_save_response.status_code, 200)\n self.assertContains(after_save_response, \"75,000\")\n self.assertContains(after_save_response, \"125,000\")", "def test_partial_allocation(self):\n\n # Fully allocate tracked stock against build output 1\n self.allocate_stock(\n self.output_1,\n {\n self.stock_3_1: 6,\n }\n )\n\n self.assertTrue(self.build.isFullyAllocated(self.output_1))\n\n # Partially allocate tracked stock against build output 2\n self.allocate_stock(\n self.output_2,\n {\n self.stock_3_1: 1,\n }\n )\n\n self.assertFalse(self.build.isFullyAllocated(self.output_2))\n\n # Partially allocate untracked stock against build\n self.allocate_stock(\n None,\n {\n self.stock_1_1: 1,\n self.stock_2_1: 1\n }\n )\n\n self.assertFalse(self.build.isFullyAllocated(None, verbose=True))\n\n unallocated = self.build.unallocatedParts(None)\n\n self.assertEqual(len(unallocated), 2)\n\n self.allocate_stock(\n None,\n {\n self.stock_1_2: 100,\n }\n )\n\n self.assertFalse(self.build.isFullyAllocated(None, verbose=True))\n\n unallocated = self.build.unallocatedParts(None)\n\n self.assertEqual(len(unallocated), 1)\n\n self.build.unallocateUntracked()\n\n unallocated = self.build.unallocatedParts(None)\n\n self.assertEqual(len(unallocated), 2)\n\n self.assertFalse(self.build.areUntrackedPartsFullyAllocated())\n\n # Now we \"fully\" allocate the untracked untracked items\n self.allocate_stock(\n None,\n {\n self.stock_1_1: 50,\n self.stock_2_1: 50,\n }\n )\n\n self.assertTrue(self.build.areUntrackedPartsFullyAllocated())", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def _is_order_filled(self):\r\n if self.filled_quantity == self.quantity:\r\n self.order_finish()", "def test_transfer_split(self):\n\n transfer = Transfer(src=Well(), dest=Well(), volume=1000) # millileter\n\n max_volume = 50.0\n split = transfer.split(max_volume, 1.0)\n self.assertEqual(20, len(split))\n self.assertTrue(all(t.volume and t.volume <= max_volume for t in split))", "def test_buy_invalid_quantity_big(self, *_):\n self.open(base_url + '/logout')\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"test_frontend@test.com\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open home page\n self.open(base_url)\n # Type invalid ticket name\n self.type(\"#name_buy\", \"Test name\")\n # Type in ticket quantity\n self.type(\"#quantity_buy\", \"101\")\n # click submit button for buy\n self.click(\"#submit-buy\")\n # make sure it shows proper error message\n self.assert_element(\"#message\")\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")", "def test_needed_funds_filled(self):\n account = AccountFactory()\n base_account_cash = account.cash\n order = OrderFactory(side=LONG, account=account)\n order.save_to_db()\n new_account_cash = account.cash\n\n assert order.needed_funds is not None\n assert new_account_cash == base_account_cash - order.needed_funds", "def test_generate_04_generate_in_multiple_time(self):\n nbre_of_lines = 10\n move = self.get_new_move(nbre_of_lines)\n\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n # First assignment\n form_wizard.next_serial_count = 3\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n # Second assignment\n form_wizard.next_serial_count = 2\n form_wizard.next_serial_number = 'bilou-64'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n # Third assignment\n form_wizard.next_serial_count = 4\n form_wizard.next_serial_number = 'ro-1337-bot'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n # Checks all move lines have the right SN\n generated_numbers = [\n # Correspond to the first assignment\n '001', '002', '003',\n # Correspond to the second assignment\n 'bilou-64', 'bilou-65',\n # Correspond to the third assignment\n 'ro-1337-bot', 'ro-1338-bot', 'ro-1339-bot', 'ro-1340-bot',\n ]\n self.assertEqual(len(move.move_line_ids), nbre_of_lines + len(generated_numbers))\n self.assertEqual(len(move.move_line_nosuggest_ids), len(generated_numbers))\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n self.assertEqual(move_line.lot_name, generated_numbers.pop(0))\n for move_line in (move.move_line_ids - move.move_line_nosuggest_ids):\n self.assertEqual(move_line.qty_done, 0)\n self.assertEqual(move_line.lot_name, False)", "def transfer_multi_chain(self, candidate, quantity):\n assert isinstance(candidate, Candidate), type(candidate)\n assert isinstance(quantity, Quantity), type(quantity)\n\n if self.balance() >= quantity:\n byte_quantity = int(quantity)\n self.multi_chain_community.schedule_block(candidate, -byte_quantity, byte_quantity)\n else:\n raise InsufficientFunds()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure a kit is split in the corrects product_qty by components in case of a planned transfer.
def test_kit_planned_transfer(self): picking = self.env['stock.picking'].create({ 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, 'partner_id': self.test_partner.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'immediate_transfer': False, }) move_receipt_1 = self.env['stock.move'].create({ 'name': self.kit_parent.name, 'product_id': self.kit_parent.id, 'product_uom_qty': 3, 'product_uom': self.kit_parent.uom_id.id, 'picking_id': picking.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, }) picking.action_confirm() # We check that the picking has the correct quantities after its move were splitted. self.assertEqual(len(picking.move_lines), 7) for move_line in picking.move_lines: self.assertEqual(move_line.product_qty, self.expected_quantities[move_line.product_id])
[ "def _check_overprocessed_subcontract_qty(self):\n overprocessed_moves = self.env['stock.move']\n for move in self:\n if not move.is_subcontract:\n continue\n # Extra quantity is allowed when components do not need to be register\n if not move._has_tracked_subcontract_components():\n continue\n rounding = move.product_uom.rounding\n if float_compare(move.quantity_done, move.move_orig_ids.production_id.qty_produced, precision_rounding=rounding) > 0:\n overprocessed_moves |= move\n if overprocessed_moves:\n raise UserError(_(\"\"\"\nYou have to use 'Records Components' button in order to register quantity for a\nsubcontracted product(s) with tracked component(s):\n %s.\nIf you want to process more than initially planned, you\ncan use the edit + unlock buttons in order to adapt the initial demand on the\noperations.\"\"\") % ('\\n'.join(overprocessed_moves.mapped('product_id.display_name'))))", "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def _compute_qty(self):\n uom_obj = self.env['product.uom']\n move_obj = self.env['stock.move']\n pol_obj = self.env['purchase.order.line']\n pol_rm_rcs = pol_obj.search([('wo_rm_subcontracting_id', '=', self.wo_id.id), \n ('product_id', '=', self.product_id.id), \n ('purchase_state', 'in', ('draft', 'waiting'))])\n \n move_rm_rcs = move_obj.search([('wo_rm_subcontracting_id', '=', self.wo_id.id), \n ('product_id', '=', self.product_id.id), \n ('state', '!=', 'cancel'),\n '|', ('type_subcontracting', '!=', 'indirect'), \n '&', ('type_subcontracting', '=', 'indirect'), ('type', '=', 'out')])\n ordered_qty = 0\n taken_fromstock_qty = 0\n out_qty = 0\n soon_out_qty = 0\n # Calcule de la qté commandée, prise sur le stock et envoyé\n for pol_rm in pol_rm_rcs:\n qty = uom_obj._compute_qty_obj(pol_rm.uom_id, pol_rm.uom_qty, self.rm_fp_draft_id.uom_id, with_raise=True, with_round=False)\n soon_out_qty += qty\n if pol_rm.type_subcontracting == 'stock':\n taken_fromstock_qty += qty\n elif pol_rm.type_subcontracting == 'indirect':\n ordered_qty += qty\n else:\n ordered_qty += qty\n \n \n for move_rm in move_rm_rcs:\n qty = uom_obj._compute_qty_obj(move_rm.uom_id, move_rm.uom_qty, self.rm_fp_draft_id.uom_id, with_raise=True, with_round=False)\n out_qty += qty\n if move_rm.type_subcontracting == 'stock':\n taken_fromstock_qty += qty\n elif move_rm.type_subcontracting == 'indirect':\n ordered_qty += qty\n else:\n ordered_qty += qty \n \n # Si c'est le dernier OT de l'OF, déclaration du produit final\n# TODO THOMAS: commenté car non utilisé. À supprimer ulterieurement\n# last_wo = False\n# if not self.wo_id.next_wo_ids:\n# last_wo = True\n# else:\n# last_wo = True\n# for next_wo in self.wo_id.next_wo_ids:\n# if next_wo.mo_id.id == self.wo_id.mo_id.id:\n# last_wo = False\n# break\n# \n# if last_wo:\n# product_fp_id = self.wo_id.final_product_id.id\n# else:\n# product_fp_id = False\n# for fp in self.wo_id.fp_draft_ids:\n# if fp.product_id.is_int:\n# product_fp_id = fp.product_id.id\n \n # Calcule de la qté déjà utilisée et la déduction des qtés déjà achetées mais non utilisées\n pol_fp_rcs = pol_obj.search([('wo_fp_subcontracting_id', '=', self.wo_id.id)])\n qty_product_final_use = 0\n po_ids = []\n for pol_fp in pol_fp_rcs:\n if pol_fp.purchase_order_id.id not in po_ids and pol_fp.purchase_state != 'cancel':\n po_ids.append(pol_fp.purchase_order_id.id)\n qty_product_final_use += pol_fp.wo_qty\n \n wo_in_use_qty = self.rm_fp_draft_id.efficient_unit_qty * qty_product_final_use\n # Initialisation des champs fonctions\n self.virtual_stock = self.product_id.virtual_stock\n self.wo_total_ordered_qty = ordered_qty\n self.taken_fromstock_qty = taken_fromstock_qty\n self.wo_total_remaining_qty = (self.rm_fp_draft_id.efficient_unit_qty*self.wo_id.quantity) - ordered_qty - taken_fromstock_qty\n if self.rm_fp_draft_id.type_qty == 'variable':\n need_qty = self.rm_fp_draft_id.efficient_unit_qty * self.wiz_qty or self.wiz_qty \n else:\n need_qty = self.rm_fp_draft_id.uom_qty\n \n self.need_qty = need_qty\n self.uom_id = self.rm_fp_draft_id and self.rm_fp_draft_id.uom_id.id or self.product_id.uom_id.id \n self.wo_delivred_qty = out_qty\n self.wo_soon_delivred_qty = soon_out_qty\n self.forecast_delivred_qty = self.wo_delivred_qty + self.wo_soon_delivred_qty\n self.wo_in_use_qty = wo_in_use_qty\n if self.need_qty < ((self.wo_delivred_qty+self.wo_soon_delivred_qty)-self.wo_in_use_qty):\n self.wo_remaining_need_qty = 0\n else:\n self.wo_remaining_need_qty = self.need_qty - ((self.wo_delivred_qty+self.wo_soon_delivred_qty) - self.wo_in_use_qty)\n \n # Calcule des quantités libres dans l'emplacement de st\n warehouse_rs = self.wo_id.first_resource_id and self.wo_id.first_resource_id.location_id.warehouse_id or False\n if warehouse_rs:\n self.release_virtual_qty, self.release_qty = self.product_id.get_available_stock(return_real_stock=True, warehouse_id=warehouse_rs.id)\n else:\n self.release_qty = 0.0\n self.release_virtual_qty = 0.0", "def test_purchase_order_product_bundle(self):\n self.purchase = self.env['purchase.order'].with_user(self.purchase_user).create(self.order_vals)\n self.assertTrue(self.purchase, 'Purchase: no purchase order created')\n self.assertEqual(self.purchase.invoice_status, 'no', 'Purchase: PO invoice_status should be \"Not purchased\"')\n self.assertEqual(self.purchase.order_line.mapped('qty_received'), [0.0, 0.0], 'Purchase: no product should be received\"')\n self.assertEqual(self.purchase.order_line.mapped('qty_invoiced'), [0.0, 0.0], 'Purchase: no product should be invoiced\"')\n\n self.purchase.button_confirm()\n self.assertEqual(self.purchase.state, 'purchase', 'Purchase: PO state should be \"Purchase\"')\n self.assertEqual(self.purchase.invoice_status, 'to invoice', 'Purchase: PO invoice_status should be \"Waiting Invoices\"')\n\n self.assertEqual(self.purchase.picking_count, 1, 'Purchase: one picking should be created\"')\n self.picking = self.purchase.picking_ids[0]\n self.picking.move_line_ids.write({'qty_done': 1.0})\n self.picking.button_validate()\n\n product_bundle_line = self.purchase.order_line.filtered(lambda l: l.product_id == self.product_bundle_id)\n product_3_line = self.purchase.order_line.filtered(lambda l: l.product_id == self.product_3)\n self.bundle_order_qty = sum(product_bundle_line.mapped('product_uom_qty'))\n self.product_3_order_qty = sum(product_3_line.mapped('product_uom_qty'))\n self.total_bundle_order_qty = self.count_item_pack *self.bundle_order_qty\n\n self.assertEqual(self.bundle_order_qty, 1, 'Purchase: product bundle ordered quantity')\n self.assertEqual(self.total_bundle_order_qty, 3, 'Purchase: product bundle total quantity')\n self.assertEqual(self.product_3_order_qty, 1, 'Purchase: product Samsung S20 ordered quantity')\n self.assertEqual(product_bundle_line.mapped('qty_received'), [self.total_bundle_order_qty], 'Purchase: the product bundle should be received\"')\n self.assertEqual(product_3_line.mapped('qty_received'), [self.product_3_order_qty], 'Purchase: the product samsung S20 should be received\"')\n \n move_form = Form(self.env['account.move'].with_context(default_move_type='in_invoice'))\n move_form.partner_id = self.vendor\n move_form.purchase_id = self.purchase\n self.bill = move_form.save()\n\n # Control Policy products is On ordered quantities\n # self.bundle_order_qty = 1\n # self.product_3_order_qty = 1\n self.assertEqual(self.purchase.order_line.mapped('qty_invoiced'), [1, 1], 'Purchase: all products should be invoiced based on ordered quantity\"')", "def test_flow_tracked_only_finished(self):\n self.finished_product.tracking = \"serial\"\n self.comp1_sn.tracking = \"none\"\n nb_finished_product = 3\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished_product\n move.product_uom_qty = nb_finished_product\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n # We shouldn't be able to call the 'record_components' button\n self.assertFalse(picking_receipt.display_action_record_components)\n\n wh = picking_receipt.picking_type_id.warehouse_id\n lot_names_finished = [f\"subtracked_{i}\" for i in range(nb_finished_product)]\n\n move_details = Form(picking_receipt.move_lines, view='stock.view_stock_move_nosuggest_operations')\n for lot_name in lot_names_finished:\n with move_details.move_line_nosuggest_ids.new() as ml:\n ml.qty_done = 1\n ml.lot_name = lot_name\n move_details.save()\n\n picking_receipt.button_validate()\n # Check the created manufacturing order\n # Should have one mo by serial number\n mos = picking_receipt.move_lines.move_orig_ids.production_id\n self.assertEqual(len(mos), nb_finished_product)\n self.assertEqual(mos.mapped(\"state\"), [\"done\"] * nb_finished_product)\n self.assertEqual(mos.picking_type_id, wh.subcontracting_type_id)\n self.assertFalse(mos.picking_type_id.active)\n self.assertEqual(set(mos.lot_producing_id.mapped(\"name\")), set(lot_names_finished))\n\n # Available quantities should be negative at the subcontracting location for each components\n avail_qty_comp1 = self.env['stock.quant']._get_available_quantity(self.comp1_sn, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_comp2 = self.env['stock.quant']._get_available_quantity(self.comp2, self.subcontractor_partner1.property_stock_subcontractor, allow_negative=True)\n avail_qty_finished = self.env['stock.quant']._get_available_quantity(self.finished_product, wh.lot_stock_id)\n self.assertEqual(avail_qty_comp1, -nb_finished_product)\n self.assertEqual(avail_qty_comp2, -nb_finished_product)\n self.assertEqual(avail_qty_finished, nb_finished_product)", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_split_report_bad_input(self):\n purchase_quantity = 1.0\n buy = transaction_utils.make_transaction(BUY, purchase_quantity, 0, 100.0)\n sell = transaction_utils.make_transaction(SELL, 2.0, 0, 100.0)\n with self.assertRaises(AssertionError):\n basis._split_report(\n buy, purchase_quantity, sell\n ) # Should not split the basis coin, quantity matches", "def calc_qty(self, cr, uid, production_id, context=None):\n prod = self.pool.get('mrp.production').browse(cr, uid,production_id\n , context=context)\n done = 0.0\n for wo in prod.workcenter_lines:\n for mrej in wo.moves_rejection:\n done += mrej.s_rejected_qty or 0.0\n for move in prod.move_created_ids2:\n if move.product_id == prod.product_id:\n #ignore scrapped and extra consumed\n if (not move.scrapped) or (not move.extra_consumed):\n done += move.product_qty\n if (prod.product_qty - done) <= 0:\n raise osv.except_osv(_('Warning!'), _('Click on \"Force To Close\" button to generate remain scrap order.'))\n return (prod.product_qty - done) or prod.product_qty", "def test_quantity_price_productvariation(self):\n\n # base product\n product = Product.objects.get(slug=\"dj-rocks\")\n self.assertEqual(product.unit_price, Decimal(\"20.00\"))\n self.assertEqual(product.unit_price, product.get_qty_price(1))\n\n # product with no price delta\n product = Product.objects.get(slug=\"dj-rocks-s-b\")\n self.assertEqual(product.unit_price, Decimal(\"20.00\"))\n self.assertEqual(product.unit_price, product.get_qty_price(1))\n\n # product which costs more due to details\n product = Product.objects.get(slug=\"dj-rocks-l-bl\")\n self.assertEqual(product.unit_price, Decimal(\"23.00\"))\n self.assertEqual(product.unit_price, product.get_qty_price(1))", "def test_flow_4(self):\n # Tick \"manufacture\" and MTO on self.comp2\n mto_route = self.env.ref('stock.route_warehouse0_mto')\n mto_route.active = True\n manufacture_route = self.env['stock.location.route'].search([('name', '=', 'Manufacture')])\n self.comp2.write({'route_ids': [(4, manufacture_route.id, None)]})\n self.comp2.write({'route_ids': [(4, mto_route.id, None)]})\n\n orderpoint_form = Form(self.env['stock.warehouse.orderpoint'])\n orderpoint_form.product_id = self.comp2\n orderpoint_form.product_min_qty = 0.0\n orderpoint_form.product_max_qty = 10.0\n orderpoint_form.location_id = self.env.company.subcontracting_location_id\n orderpoint = orderpoint_form.save()\n\n # Create a receipt picking from the subcontractor\n picking_form = Form(self.env['stock.picking'])\n picking_form.picking_type_id = self.env.ref('stock.picking_type_in')\n picking_form.partner_id = self.subcontractor_partner1\n with picking_form.move_ids_without_package.new() as move:\n move.product_id = self.finished\n move.product_uom_qty = 1\n picking_receipt = picking_form.save()\n picking_receipt.action_confirm()\n\n warehouse = picking_receipt.picking_type_id.warehouse_id\n\n # Pickings should directly be created\n mo = self.env['mrp.production'].search([('bom_id', '=', self.bom.id)])\n self.assertEqual(mo.state, 'confirmed')\n\n picking_delivery = mo.picking_ids\n self.assertFalse(picking_delivery)\n\n picking_delivery = self.env['stock.picking'].search([('origin', 'ilike', '%' + picking_receipt.name + '%')])\n self.assertFalse(picking_delivery)\n\n move = self.env['stock.move'].search([\n ('product_id', '=', self.comp2.id),\n ('location_id', '=', warehouse.lot_stock_id.id),\n ('location_dest_id', '=', self.env.company.subcontracting_location_id.id)\n ])\n self.assertTrue(move)\n picking_delivery = move.picking_id\n self.assertTrue(picking_delivery)\n self.assertEqual(move.product_uom_qty, 11.0)\n\n # As well as a manufacturing order for `self.comp2`\n comp2mo = self.env['mrp.production'].search([('bom_id', '=', self.comp2_bom.id)])\n self.assertEqual(len(comp2mo), 1)", "def parts_demand(request):\n critical_part = []\n quantity = None\n bom_name = None\n if request.method == 'POST':\n form = PartsDemandForm(request.POST)\n if form.is_valid():\n bom_name = form.cleaned_data['bom']\n quantity = int(form.cleaned_data['quantity'])\n warehouse = form.cleaned_data['warehouse']\n warehouse_obj = Warehouse.objects.get(warehouse_name=warehouse)\n stock = calculate_stock(warehouse_obj)\n parts = get_bom_parts(bom_name)\n print(stock)\n for part in parts:\n part_qty = float(part['Qty'])\n part_name = part['PartName']\n part_number = part['PartNumber']\n if stock.get(part_name):\n av_stock = stock.get(part_name)['total_usable_stock']\n # print(av_stock, quantity, part_qty, quantity * part_qty)\n else:\n av_stock = 0\n critical = int(av_stock) - int(quantity * part_qty)\n if critical <= 0:\n test = {\n \"critical_qty\": critical,\n \"part_number\": part_number,\n \"part_name\": part_name\n }\n critical_part.append(test)\n else:\n form = PartsDemandForm()\n context = {\n 'form': form,\n 'critical_part': critical_part,\n 'quantity': quantity,\n 'bom': bom_name,\n }\n\n return render(request, 'inventory/parts_demand.html', context)", "def test_kit_immediate_transfer(self):\n picking = self.env['stock.picking'].create({\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n 'partner_id': self.test_partner.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'immediate_transfer': True\n })\n move_receipt_1 = self.env['stock.move'].create({\n 'name': self.kit_parent.name,\n 'product_id': self.kit_parent.id,\n 'quantity_done': 3,\n 'product_uom': self.kit_parent.uom_id.id,\n 'picking_id': picking.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n })\n picking.button_validate()\n\n # We check that the picking has the correct quantities after its move were splitted.\n self.assertEqual(len(picking.move_lines), 7)\n for move_line in picking.move_lines:\n self.assertEqual(move_line.quantity_done, self.expected_quantities[move_line.product_id])", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def test_buy_invalid_quantity_big(self, *_):\n self.open(base_url + '/logout')\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"test_frontend@test.com\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open home page\n self.open(base_url)\n # Type invalid ticket name\n self.type(\"#name_buy\", \"Test name\")\n # Type in ticket quantity\n self.type(\"#quantity_buy\", \"101\")\n # click submit button for buy\n self.click(\"#submit-buy\")\n # make sure it shows proper error message\n self.assert_element(\"#message\")\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")", "def test_product_bundle_price_calculation(self):\n template = self.product_apple_bundle\n template.write({'is_calpack_price': False})\n template.write({'is_calpack_price': True})\n self.assertEqual(template.list_price, self.total_price, 'Product: a product bundle canculation sale price')\n self.assertEqual(template.standard_price, self.total_cost, 'Product: a product bundle canculation product cost')", "def test_sell_ticket_valid_quantity(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"test_frontend@test.com\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type('#quantity_sell', \"-1\")\n self.type(\"#price_sell\", \"15\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")\n\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"test_frontend@test.com\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type('#quantity_sell', \"101\")\n self.type(\"#price_sell\", \"15\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")", "def validate_params_qty(self, command, params_qty, valid_qty):\n\n if params_qty in valid_qty:\n return True\n\n return False", "def test_add_new_product_with_min_quantity_missing(self):\n response = self.app_test_client.post('{}/products'.format(\n self.BASE_URL), json={\n 'product_name': \"Hammer\",\n 'product_price': 300, 'inventory': 10, 'category': self.category_id\n }, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(common_functions.convert_response_to_json(\n response)['message'],\n 'Please define the minimum quantity')", "def test_bundle_is_product_pack(self):\n template = self.product_apple_bundle\n product_pack_ids = template.product_pack_ids\n self.assertTrue(template.is_pack, 'Product template is a bundle pack')\n self.assertTrue(len(product_pack_ids) != 0, 'Product: a product bundle should have product pack')\n self.assertEqual(len(product_pack_ids), 3, 'Product: a product bundle should have product pack')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ability that deals damage to the target
def ability_1(self,target): damage = (self.get_strength()+2) target.receive_damage(damage)
[ "def ability_3(self,target):\r\n damage = (self.get_dexterity()+self.get_strength())\r\n target.receive_damage(damage)", "def deal_damage(self, target):\n if hasattr(target, \"hp\"):\n dmg = random.randrange(self.atk + 1)\n target.take_damage(dmg)\n return dmg", "def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk", "def throw_at(self, target):\n if target is not None:\n target.reduce_armor(self.damage)", "def damage(self):\n self.damaged = True", "def attack(self, target:'Character'):\n print(self.attack_speech)\n weapon = self.equipped_weapon\n dmg = weapon.damage\n dmg_type = weapon.damage_type\n if target.health <= 0:\n print(\"{} is already dead!\".format(target.name))\n return False\n else:\n if randrange(0, 100) <= weapon.hit_chance:\n dmg = weapon.damage + randrange(-1, 1)\n print(\"Hit!\")\n target.take_damage(dmg, weapon.damage_type)\n return True\n else:\n print(\"Miss!\")\n return False", "def can_attack(self, target):\n return True", "def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result", "def attack(self, target):\n damage = random.choice(self.damage_range)\n attack = generics.Attack(self, damage)\n try:\n target.take_attack(attack)\n except exceptions.DeathException as death:\n self.on_win(target)\n raise death\n return attack", "def take_damage(self):\n self.health -= 1", "def attack(self):\n pass", "def damage_handle(self, hp_modifier: int):\r\n health = self.health\r\n health += hp_modifier\r\n if health > 100:\r\n health -= (health - 100)\r\n elif health == 0:\r\n print(\"Oh no! You have died!\")", "def apply_ability_effects(ability: dict, target: Player, self: Player) -> None:\n for effect in ability[\"effects\"]:\n if effect[\"target\"] == \"target\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=target\n )\n elif effect[\"target\"] == \"self\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=self\n )", "def attack(self, target_robot):\n self._make_damage(target_robot)\n self._make_damage(target_robot)", "def ship_took_damage(self, damage: Damage):\n pass", "def player_damage(self, enemy):\n max_dmg, min_dmg = self.weapon.damagerange()\n base_damage = random.randint(min_dmg, max_dmg)\n multiplier = self.weapon.orb.damagemultiplier[enemy.orb.name]\n\n # this is printed to give the player some idea of how orbs\n # affect other orbs\n if multiplier > 1:\n print(\"{} is strong against {}!\".format(self.weapon.orb.name, enemy.orb.name))\n elif multiplier < 1:\n print(\"{} is weak against {}!\".format(self.weapon.orb.name, enemy.orb.name))\n else:\n print(\"You use {} against {}!\".format(self.weapon.name, enemy.name))\n\n damage = round(base_damage * multiplier,1)\n enemy.hp = round(enemy.hp - damage,1)\n\n print(\"You did {} damage.\".format(damage))", "def take_damage(self, dmg, dtype = 1):\n self.game.hit_sound.play()\n \n #DR% = 1 - (100 / x). \n damageMultiplier = 100.0 / float(self.defense)\n #Apply defense buffs/debuffs\n #calculate damage:\n dmg -= self.absorbtion\n dmg *= damageMultiplier\n #apply damage\n self.hp[0] -= dmg", "def attacked_by_hero(self):\n self.health = self.health - Hero.attack_strength", "def cast_ability(self, ability_id, target, map):\n # Remove current casting\n self.casting = None\n\n if not target:\n raise InvalidTargetException\n elif target.dead:\n raise InvalidTargetException\n\n # Check if we can use the ability\n self.can_use_ability(ability_id, True)\n\n # Check if we are in range\n if not map.in_vision_of(self.position,\n target.position,\n gameConstants.abilitiesList[ability_id][\"Range\"]):\n raise OutOfRangeException\n\n # Get ability json\n ability = copy.deepcopy(gameConstants.abilitiesList[ability_id])\n\n # Apply cool down\n self.abilities[ability_id] = ability[\"Cooldown\"]\n\n # Iterate through stat changes\n for stat_change in ability['StatChanges']:\n if stat_change['Attribute'] == 'Health':\n if stat_change['Change'] > 0:\n stat_change['Change'] = stat_change['Change'] + self.attributes.get_attribute(\"SpellPower\")\n elif stat_change['Change'] < 0:\n stat_change['Change'] = stat_change['Change'] - self.attributes.get_attribute(\"SpellPower\")\n if stat_change['Target'] == 0 and target is self:\n self.add_stat_change(stat_change)\n elif stat_change['Target'] == 1:\n target.add_stat_change(stat_change)\n else:\n raise InvalidTargetException" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ability that deals damage to the target
def ability_3(self,target): damage = (self.get_dexterity()+self.get_strength()) target.receive_damage(damage)
[ "def ability_1(self,target):\r\n damage = (self.get_strength()+2)\r\n target.receive_damage(damage)", "def deal_damage(self, target):\n if hasattr(target, \"hp\"):\n dmg = random.randrange(self.atk + 1)\n target.take_damage(dmg)\n return dmg", "def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk", "def throw_at(self, target):\n if target is not None:\n target.reduce_armor(self.damage)", "def damage(self):\n self.damaged = True", "def attack(self, target:'Character'):\n print(self.attack_speech)\n weapon = self.equipped_weapon\n dmg = weapon.damage\n dmg_type = weapon.damage_type\n if target.health <= 0:\n print(\"{} is already dead!\".format(target.name))\n return False\n else:\n if randrange(0, 100) <= weapon.hit_chance:\n dmg = weapon.damage + randrange(-1, 1)\n print(\"Hit!\")\n target.take_damage(dmg, weapon.damage_type)\n return True\n else:\n print(\"Miss!\")\n return False", "def can_attack(self, target):\n return True", "def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result", "def attack(self, target):\n damage = random.choice(self.damage_range)\n attack = generics.Attack(self, damage)\n try:\n target.take_attack(attack)\n except exceptions.DeathException as death:\n self.on_win(target)\n raise death\n return attack", "def take_damage(self):\n self.health -= 1", "def attack(self):\n pass", "def damage_handle(self, hp_modifier: int):\r\n health = self.health\r\n health += hp_modifier\r\n if health > 100:\r\n health -= (health - 100)\r\n elif health == 0:\r\n print(\"Oh no! You have died!\")", "def apply_ability_effects(ability: dict, target: Player, self: Player) -> None:\n for effect in ability[\"effects\"]:\n if effect[\"target\"] == \"target\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=target\n )\n elif effect[\"target\"] == \"self\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=self\n )", "def attack(self, target_robot):\n self._make_damage(target_robot)\n self._make_damage(target_robot)", "def ship_took_damage(self, damage: Damage):\n pass", "def player_damage(self, enemy):\n max_dmg, min_dmg = self.weapon.damagerange()\n base_damage = random.randint(min_dmg, max_dmg)\n multiplier = self.weapon.orb.damagemultiplier[enemy.orb.name]\n\n # this is printed to give the player some idea of how orbs\n # affect other orbs\n if multiplier > 1:\n print(\"{} is strong against {}!\".format(self.weapon.orb.name, enemy.orb.name))\n elif multiplier < 1:\n print(\"{} is weak against {}!\".format(self.weapon.orb.name, enemy.orb.name))\n else:\n print(\"You use {} against {}!\".format(self.weapon.name, enemy.name))\n\n damage = round(base_damage * multiplier,1)\n enemy.hp = round(enemy.hp - damage,1)\n\n print(\"You did {} damage.\".format(damage))", "def take_damage(self, dmg, dtype = 1):\n self.game.hit_sound.play()\n \n #DR% = 1 - (100 / x). \n damageMultiplier = 100.0 / float(self.defense)\n #Apply defense buffs/debuffs\n #calculate damage:\n dmg -= self.absorbtion\n dmg *= damageMultiplier\n #apply damage\n self.hp[0] -= dmg", "def attacked_by_hero(self):\n self.health = self.health - Hero.attack_strength", "def cast_ability(self, ability_id, target, map):\n # Remove current casting\n self.casting = None\n\n if not target:\n raise InvalidTargetException\n elif target.dead:\n raise InvalidTargetException\n\n # Check if we can use the ability\n self.can_use_ability(ability_id, True)\n\n # Check if we are in range\n if not map.in_vision_of(self.position,\n target.position,\n gameConstants.abilitiesList[ability_id][\"Range\"]):\n raise OutOfRangeException\n\n # Get ability json\n ability = copy.deepcopy(gameConstants.abilitiesList[ability_id])\n\n # Apply cool down\n self.abilities[ability_id] = ability[\"Cooldown\"]\n\n # Iterate through stat changes\n for stat_change in ability['StatChanges']:\n if stat_change['Attribute'] == 'Health':\n if stat_change['Change'] > 0:\n stat_change['Change'] = stat_change['Change'] + self.attributes.get_attribute(\"SpellPower\")\n elif stat_change['Change'] < 0:\n stat_change['Change'] = stat_change['Change'] - self.attributes.get_attribute(\"SpellPower\")\n if stat_change['Target'] == 0 and target is self:\n self.add_stat_change(stat_change)\n elif stat_change['Target'] == 1:\n target.add_stat_change(stat_change)\n else:\n raise InvalidTargetException" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the path of the ocamlmerlin binary."
def merlin_bin(): user_settings = sublime.load_settings("Merlin.sublime-settings") merlin_path = user_settings.get('ocamlmerlin_path') if merlin_path: return merlin_path # For Mac OS X, add the path for homebrew if "/usr/local/bin" not in os.environ['PATH'].split(os.pathsep): os.environ['PATH'] += os.pathsep + "/usr/local/bin" opam_process = subprocess.Popen('opam config var bin', stdout=subprocess.PIPE, shell=True) opam_bin_path = opam_process.stdout.read().decode('utf-8').rstrip() + '/ocamlmerlin' if os.path.isfile(opam_bin_path) and os.access(opam_bin_path, os.X_OK): return opam_bin_path else: return 'ocamlmerlin'
[ "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def binpath (self):\n return self._basepath + '.bin'", "def BinaryPath(name):\n return os.path.join(OLDISIM_DIR, BINARY_BASE, name)", "def dir_bin():\n return abspath('bin')", "def get_golem_path():\r\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\"))", "def archbin(self):\n return join_path(\"platforms\", self.foam_arch, \"bin\")", "def mitogen_buildah_path(self):", "def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"", "def get_main_module_path():\n program = sys.argv[0]\n path = os.path.realpath(program)\n if program != \"\":\n path = os.path.split(path)[0]\n return path", "def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"", "def mrjob_pythonpath():\n return os.path.abspath(\n os.path.join(os.path.dirname(mrjob.__file__), '..'))", "def get_pythainlp_path() -> str:\n return os.path.dirname(pythainlp.__file__)", "def getBinary():\n binary = shutil.which(_ROCKETLOGGER_BINARY)\n\n if not os.path.exists(binary):\n raise FileNotFoundError(f\"Could not find RocketLogger CLI binary! [{binary}]\")\n return os.path.abspath(binary)", "def bin_root(self):\n return os.path.join(self.build_dir, self.build, \"stage0\")", "def glance_bin_root():\n return str(values.get(\"glance_code_root\")) + \"/bin/\"", "def get_target_binary():\n file_location = prompt_base(\"where is the file located?\")\n file_location = os.path.abspath(file_location)\n return file_location", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def binary_location(cmd, USE_PATH=False):\n if USE_PATH:\n return cmd\n else:\n return os.path.join(BIN_PREFIX, cmd)", "def GetKratosMultiphysicsPath():\n return os.path.dirname(KM.__file__)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the current view is an OCaml source code.
def is_ocaml(view): ocaml = 'source.ocaml' mlfi = 'source.mlfi' location = view.sel()[0].begin() return view.match_selector(location, ocaml) or view.match_selector(location, mlfi)
[ "def test_non_js_source(self):\n self.view.set_syntax_file(\"Packages/Python/Python.tmLanguage\")\n\n actual = is_js_source(self.view)\n\n self.assertFalse(actual)", "def set_code_viewer_is_source(*args):\n return _ida_kernwin.set_code_viewer_is_source(*args)", "def is_python():\n return has_source(r'.*\\.py$')", "def _is_python_editor(self, codeeditor):\n if codeeditor.filename is None:\n return False\n txt = codeeditor.get_text_with_eol()\n language = get_file_language(codeeditor.filename, txt)\n return language.lower() in ALL_LANGUAGES[\"Python\"]", "def is_pysource(fname):\n return not fname.startswith('.') and fname.endswith(('.py', '.pyw'))", "def is_go_source(cls, path):\n return path.endswith('.go') and os.path.isfile(path)", "def _is_source_code(self, commit):\n extension_set = set()\n for file in commit['files']:\n extension_set.add(self._get_extension(file))\n\n if self.source_code_exclude_list is None or len(extension_set.difference(self.source_code_exclude_list)) > 0:\n return True\n return False", "def test_js_source(self):\n actual = is_js_source(self.view)\n\n self.assertTrue(actual)", "def is_old_code():\r\n code_exists = exists('~/viewfinder')\r\n if not code_exists:\r\n # So such directory or link.\r\n return False\r\n with settings(warn_only=True):\r\n is_link = run('readlink ~/viewfinder')\r\n if is_link.return_code == 0:\r\n # This is a symlink. New-style code.\r\n return False\r\n return True", "def is_markdown():\n return has_source(r'.*\\.md$')", "def is_source(filename):\n\n accepted = {\n '.c', '.cc', '.cp', '.cpp', '.cxx', '.c++', '.m', '.mm', '.i', '.ii',\n '.mii'\n }\n __, ext = os.path.splitext(filename)\n return ext.lower() in accepted", "def is_authoring_source(view):\n if view.match_selector(0, \"text.hyperhelp.help\"):\n return not view.is_read_only()\n\n return False", "def is_python_scope(view, location):\r\n return view.match_selector(location, \"source.python - string - comment\")", "def test_source(self):\n with open(__file__, 'r') as f:\n contents = f.read()\n\n lexer = syntax_highlighting.fetch_lexer(contents)\n self.assertIn(lexer.__class__.__name__, PYTHON_LEXER_CLASS_NAMES)", "def is_source_file(path):\n return os.path.splitext(path)[1].lower() in SOURCE_FILE_EXT", "def is_python_scope(view, location):\n return view.match_selector(location, \"source.python - string - comment\")", "def has_code_module(self) -> bool:\n return self.module_info is not None", "def isSource(self):\n \n pass", "def is_by_sources(module):\n return module.location is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute the given function if we are in an OCaml source code only.
def only_ocaml(func): @functools.wraps(func) def wrapper(self, view, *args, **kwargs): if is_ocaml(view): return func(self, view, *args, **kwargs) return wrapper
[ "def ast_only_test(func):\n\n def impl(*args, **kwargs):\n if os.environ.get(\"ENABLE_FALL_BACK\", \"False\") == \"False\":\n func(*args, **kwargs)\n\n return impl", "def is_function(f) -> bool:\n return hasattr(f, '__code__')", "def codetest(source, functionname, args):\n from pypy.interpreter import baseobjspace\n from pypy.interpreter import pyframe, gateway, module\n space = Space()\n\n source = str(py.code.Source(source).strip()) + '\\n'\n\n w = space.wrap\n w_code = space.builtin.call('compile', \n w(source), w('<string>'), w('exec'), w(0), w(0))\n\n tempmodule = module.Module(space, w(\"__temp__\"))\n w_glob = tempmodule.w_dict\n space.setitem(w_glob, w(\"__builtins__\"), space.builtin)\n\n code = space.unwrap(w_code)\n code.exec_code(space, w_glob, w_glob)\n\n wrappedargs = [w(a) for a in args]\n wrappedfunc = space.getitem(w_glob, w(functionname))\n def callit():\n return space.call_function(wrappedfunc, *wrappedargs)\n return callit\n try:\n w_output = space.call_function(wrappedfunc, *wrappedargs)\n except baseobjspace.OperationError, e:\n #e.print_detailed_traceback(space)\n return '<<<%s>>>' % e.errorstr(space)\n else:\n return space.unwrap(w_output)", "def run_code(self, source, filename):\n code = compile(source, filename, 'exec')\n try:\n exec code in self.globals\n except KeyboardInterrupt:\n self.world.quit()", "def in_function_code(self):\n return self.lscope is not None and self.sscope is None", "def run(src):\n mod = module(src)\n main = mod.get(\"main\")\n if not main:\n raise RuntimeError('módulo não define uma função \"main()\"')\n main()", "def is_python():\n return has_source(r'.*\\.py$')", "def _has_code_flag(f, flag):\n while ismethod(f):\n f = f.__func__\n f = _unwrap_partial(f)\n if not isfunction(f):\n return False\n return bool(f.__code__.co_flags & flag)", "def shallPreferSourceCodeOverExtensionModules():\n return options is not None and options.prefer_source_code", "def code():", "def Exec_Python(code):\n # pylint: disable=exec-used\n try:\n exec(code, globals())\n # pylint: disable=broad-except\n # pylint: disable=bare-except\n except:\n _LOGGER.error('Execution of following code has failed %s', code)\n return False\n return True", "def _test():\n if sys.argv[1:]:\n if sys.argv[2:]:\n sys.stderr.write(\"usage: python dis.py [-|file]\\n\")\n sys.exit(2)\n fn = sys.argv[1]\n if not fn or fn == \"-\":\n fn = None\n else:\n fn = None\n if fn is None:\n f = sys.stdin\n else:\n f = open(fn)\n source = f.read()\n if fn is not None:\n f.close()\n else:\n fn = \"<stdin>\"\n code = compile(source, fn, \"exec\")\n dis(code)", "def run_python_fn(fn, args, kwargs = None, backend = None):\n # translate from the Python AST to Parakeet's untyped format\n untyped = ast_conversion.translate_function_value(fn)\n return run_untyped_fn(untyped, args, kwargs, backend)", "def gen_function(self, function):\n if function.body:\n self.gen_function_def(function)", "def ExecuteCode(code, global_dict):\n # Indeed, using exec generates a lint warning. But some user code\n # actually uses exec, and we have to test for it ...\n exec code in global_dict", "def run(self):\n\n module_name = os.path.basename(self.absolute_filepath)\n module_name, _ = os.path.splitext(module_name)\n module_object = imp.load_source(module_name, self.absolute_filepath)\n function_name = self.function_name\n\n if not function_name:\n function_name = 'main'\n\n func = getattr(module_object, function_name)\n func(*self.args, **self.kwargs)\n return True", "def visit_Python(self, node):\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bp_code = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bp_code.code[1:-2])", "def foo():\n print('Hello Python')", "def visit_Python(self, node):\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bp_code = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.extend_ops(bp_code.code[1:-2])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a position returned by Merlin to a Sublime text point. Sublime uses character positions and starts each file at line 0.
def merlin_pos(view, pos): return view.text_point(pos['line'] - 1, pos['col'])
[ "def from_position(tu, file, line, column):\r\n return conf.lib.clang_getLocation(tu, file, line, column)", "def point2pos(self, point):\n row = self._vim.eval('byte2line({})'.format(point))\n col = self._vim.eval('{} - line2byte({})'.format(point, row))\n return (int(row), int(col))", "def from_position(tu, file, line, column):\n return conf.lib.clang_getLocation(tu, file, line, column)", "def get_position_tex(event):\n line, column = tex_text.index('insert').split('.')\n s = \"line=%s column=%s\" % (line, column)\n var=s \n app.title(s)", "def getPos(level):\n return str(STARTING_POS[level-1][0]) + \", \" + str(STARTING_POS[level-1][1])", "def position_to_numbers(p):\n return (file_letter_to_num(p[0]), int(p[1]))", "def get_line_contents(view, location):\n return view.substr(sublime.Region(view.line(location).a, location))", "def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)", "def get_position(event):\n line, column = md_text.index('insert').split('.')\n s = \"line=%s column=%s\" % (line, column)\n var=s \n app.title(s)", "def toCoords(piece):\n return(piece.getRank()-1,piece.getFile()-1)", "def source_position(self) -> Tuple[int, int]:\n return self.templated_file.get_line_pos_of_char_pos(\n self.source_slice.start, source=True\n )", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def webotsToScenicPosition(pos):\n x, y, z = pos\n return (x, -z)", "def GetSelection(self):\n # STC HELL\n # Translate the UTF8 byte offsets to unicode\n start, end = super(EditraBaseStc, self).GetSelection()\n utf8_txt = self.GetTextUTF8()\n if start != 0:\n start = len(ed_txt.DecodeString(utf8_txt[0:start], 'utf-8'))\n if end != 0:\n end = len(ed_txt.DecodeString(utf8_txt[0:end], 'utf-8'))\n del utf8_txt\n return start, end", "def extract_point(text):\r\n # TODO: extract_point()\r\n pass", "def line(self):\r\n if self.in_builtin_module():\r\n return None\r\n return self._start_pos[0]", "def _update_pos(fh_genome, p):\n fh_genome.seek(p)\n _ = next(fh_genome)\n a, b, _ = next(fh_genome).split(None, 2)\n return (int(a), int(b))", "def remainder_of_line(self):\n return lisp('''\\\n(buffer-substring (point) (save-excursion (skip-chars-forward \"^\\n\") (point)))\n''')", "def sunpos(*args):\n return _sunpos.sunpos(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
.get_recommendation_display() will return the correct value of the recommendation choice
def test_recommendation_value(self): john_starks = Athlete(first_name="John", last_name="Starks", sport="NBA", recommendation="a") self.assertEqual(john_starks.get_recommendation_display(), "Hire Joe IMMEDIATELY!")
[ "def display_recommendation(self, n=5):\n self.n = n # update the number of recommendations to display\n if len(self.recomm) == 0:\n print(\"Sorry, there is no matching recommendations.\")\n elif self.n < len(self.recomm): # display only the top n from the recommendation list\n print(\"Below is a list of the top {} recommended restaurants for you: \".format(self.n))\n print(self.recomm.iloc[:self.n][self.column_to_display])\n else: # display all if # of recommendations is less than self.n\n print(\"Below is a list of all {} recommended restaurants for you: \".format(len(self.recomm)))\n print(self.recomm[self.column_to_display])", "def __str__(self):\n if self.recommend:\n review = 'recommended by {}: {}'.format(self.reviewer, self.comments)\n else:\n review = 'not recommended by {}: {}'.format(self.reviewer, self.comments)\n\n return review", "def recommendation_name(self) -> str:\n return pulumi.get(self, \"recommendation_name\")", "def propeller_recommendation(self):\n return self.motor.specs['prop_recommendation']", "def handle_recommendation(request):\n ticker = request.get_slot_value(slot_name=\"stockTicker\").upper()\n recommendation = Analyst(ticker).recommendation()\n\n logger.info(\n f\"recommendationMean for {ticker} is {recommendation}\")\n\n # pick right response based on recommendation mean rating\n if recommendation is None:\n message = strings.INTENT_RCMD_NO_RCMD\n elif _in_interval(recommendation, 1, 1.8):\n message = strings.INTENT_RCMD_STRONG_BUY\n elif _in_interval(recommendation, 1.8, 2.2):\n message = strings.INTENT_RCMD_BUY\n elif _in_interval(recommendation, 2.2, 2.8):\n message = strings.INTENT_RCMD_OPT_HOLD\n elif _in_interval(recommendation, 2.8, 3.2):\n message = strings.INTENT_RCMD_HOLD\n elif _in_interval(recommendation, 3.2, 3.8):\n message = strings.INTENT_RCMD_PES_HOLD\n elif _in_interval(recommendation, 3.8, 4.2):\n message = strings.INTENT_RCMD_SELL\n elif _in_interval(recommendation, 4.2, 5):\n message = strings.INTENT_RCMD_STRONG_SELL\n\n response = ResponseBuilder.create_response(request, message=message)\n reprompt_message = strings.INTENT_GENERAL_REPROMPT\n\n return response.with_reprompt(reprompt_message)", "def _recommendations_string(self, recs, sess):\n \n rec_strings = []\n \n for rec in recs:\n rec_val = float(rec.getValue())\n sub = (sess.query(orm.Subreddit)\n .filter_by(id=bmr_statbot.recommender.to_base36(rec.getItemID()))\n .one()).name\n \n rec_strings.append('* /r/{0} ({1:g})'.format(sub, rec_val))\n \n return '\\n'.join(rec_strings)", "def show_recommendation_pool(self, top_n=None):\n i = 0\n if top_n is None:\n top_n = self.number_of_recommendations\n\n for _, rdata in self.recommendation_pool.items():\n print(\"\\n{R.movie_id} - {R.title} - {R.genres}\".format(\n R=rdata['movie_obj']))\n\n if 'title_similarity' in rdata:\n print(\" Title Similarity: {} - ({})\".format(\n rdata['title_similarity'], rdata['movie_obj'].title))\n\n if 'genres_similarity' in rdata:\n print(\" Genres Similarity: {} - ({})\".format(\n rdata['genres_similarity'], rdata['movie_obj'].genres))\n\n if 'tags_similarity' in rdata:\n print(\" Tags Similarity: {} - ({})\".format(\n rdata['tags_similarity'], rdata['tags']))\n\n if 'final_similarity' in rdata:\n print(\" -> Final Similarity: {}\".format(\n rdata['final_similarity']))\n\n i += 1\n if top_n and i >= top_n:\n break", "def get(self):\n args = base_recommendation_parser.parse_args()\n try:\n recommendations = most_popular(args.max)\n except RecommendationsUnavailable:\n return {\"No data was found for requested user\"}, 204\n return {\"recommendations\": recommendations}", "def recommend():\n\tprint(\"Drink Coca-Cola.\")", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "def _getRecommendationBar(self, element):\n assert type(element) == str, 'Param element must be a type of str.'\n assert element in ['Recommendation', 'Outlook', 'OpinionOfCEO'], \"Element must be drawn from the list ['Recommendation', 'Outlook', 'OpinionOfCEO'].\"\n return self.recommendationBar[element]", "def analystrecommendation(self):\n marketwatchlink = \"https://www.marketwatch.com/investing/stock/\"+self.ticker+\"/analystestimates\"\n\n soup = self.gethtml(marketwatchlink)\n\n ar = str(soup.find(\"td\",{\"class\" :\"recommendation\"}).text).replace(\" \",\"\").\\\n replace(\"\\n\",\"\")\n\n return ar", "def get_recommendations(soup_recipe):\n recommendations = soup_recipe.find(\"h2\", {\"class\": \"description\"})\n if not recommendations:\n return None\n return recommendations.get_text()", "def recommendation_reason(self) -> str:\n return pulumi.get(self, \"recommendation_reason\")", "def test_query_recommendation_by_recommendation_type(self):\n service.Recommendation(id=0, product_id=PS4, recommended_product_id=CONTROLLER, recommendation_type=\"accessory\").save()\n service.Recommendation(id=0, product_id=PS4, recommended_product_id=PS5, recommendation_type=\"up-sell\").save()\n service.Recommendation(id=0, product_id=PS4, recommended_product_id=MONSTER_HUNTER, recommendation_type=\"cross-sell\").save()\n service.Recommendation(id=0, product_id=PS5, recommended_product_id=MONSTER_HUNTER, recommendation_type=\"cross-sell\").save()\n\n resp = self.app.get('/recommendations?recommendation_type=' + str(\"up-sell\"))\n\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0]['recommendation_type'], \"up-sell\")", "def test_get_recommendation(self):\n service.Recommendation(0, product_id=PS4, recommended_product_id=CONTROLLER, recommendation_type=\"accessory\").save()\n resp = self.app.get('/recommendations/1')\n\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['product_id'], PS4)\n self.assertEqual(data['recommended_product_id'], CONTROLLER)\n self.assertEqual(data['recommendation_type'], \"accessory\")\n self.assertEqual(data['likes'], 0)", "def get(self):\n args = user_recommendation_parser.parse_args()\n try:\n recommendations = reviews_cf(args.user_id, args.max)\n except RecommendationsUnavailable:\n return {\"No data was found for requested user\"}, 204\n\n return {\"recommendations\": recommendations}", "def get_recommendations(soup_recipe):\n ratings = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingValue\"})[\"content\"]\n ratings_count = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingCount\"})[\"content\"]\n if ratings == 0:\n return None, None\n return ratings, ratings_count", "def make_second_recommendation() -> str:\r\n growth_rate_info = highest_growth_rate()\r\n rate_strings = growth_rate_info[0]\r\n most_demand_string = growth_rate_info[1]\r\n return render_template(\"make_second_recommendation.html\",\r\n display_rates=rate_strings,\r\n most_demand_display=most_demand_string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC command.
def on_exchange_declareok(self, _unused_frame): self._channel_ctrl.queue_declare( '', exclusive=True, auto_delete=True, callback=self.on_queue_declareok )
[ "def on_exchange_declareok(self, unused_frame):\r\n LOGGER.info('Exchange declared')\r\n self.setup_queue(self.QUEUE)", "def declare(self):\r\n self.backend.exchange_declare(exchange=self.exchange,\r\n type=self.exchange_type,\r\n durable=self.durable,\r\n auto_delete=self.auto_delete)", "def _declare_exchange(self):\n print(\"Exchange is declared\")\n self._channel.exchange_declare(exchange=self._exchange,\n type=self.exchange_type)", "def temporary_exchange_declare(self, auto_delete=False, **kwargs):\n exchange_name = \"_test_pika_\" + str(uuid.uuid4())\n self.channel.exchange_declare(\n exchange_name, auto_delete=auto_delete, **kwargs\n )\n # If the server won't clean this up, we need to\n if not auto_delete:\n self.register_cleanup(\n lambda: self.channel.exchange_delete(exchange_name)\n )\n return exchange_name", "def declare_queue(self):\n\n self._channel.queue_declare(queue=self._queue_name, durable=True)\n print(\"Queue declared....\")", "def on_queue_declareok(self, method_frame):\n self.logger.info('binding %s and %s together with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)", "def _declare(self, passive=False):\n arguments = dict(self._arguments)\n if self._expires:\n arguments['x-expires'] = self._expires\n if self._message_ttl:\n arguments['x-message-ttl'] = self._message_ttl\n if self._max_length:\n arguments['x-max-length'] = self._max_length\n if self._dlx:\n arguments['x-dead-letter-exchange'] = self._dlx\n if self._dlr:\n arguments['x-dead-letter-routing-key'] = self._dlr\n return specification.Queue.Declare(queue=self.name,\n durable=self._durable,\n passive=passive,\n exclusive=self._exclusive,\n auto_delete=self._auto_delete,\n arguments=arguments)", "def setup_queue(self):\n # print('Declaring queue', self.QUEUE) #debug\n self._result = self._channel.queue_declare(queue=self.QUEUE,\n durable=self.DURABLE,\n arguments=self.ARGUMENTS)\n self._queue = self._result.method.queue", "def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')", "def exchange_declare(self, exchange, type, durable, auto_delete):\r\n return self.channel.exchange_declare(exchange=exchange,\r\n type=type,\r\n durable=durable,\r\n auto_delete=auto_delete)", "def on_queue_declareok(self, method_frame):\n # LOGGER.info('Binding %s to %s with %s',\n # self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)\n # self._channel.queue_bind(self.on_bindok, self.QUEUE,\n # self.EXCHANGE, self.ROUTING_KEY)\n logger.info(\n \"[{}] Binding to {} with queue {} and routing key \\\"\\\"\".format(self.bot_id, self.exchange,\n self.queue_name))\n\n self._channel.queue_bind(self.on_bindok,\n queue=self.queue_name,\n exchange=self.exchange,\n routing_key=\"\")", "def setup_exchange(self, channel, exchange_name, exchange_type):\n logger.info('Declaring exchange : %s', exchange_name)\n # Note: using functools.partial is not required, it is demonstrating\n # how arbitrary data can be passed to the callback when it is called\n channel.exchange_declare(exchange=exchange_name,\n exchange_type=exchange_type,\n durable = True)", "def setup_exchange(self, exchange_name):\n logger.debug('AMQPSender Declaring exchange %s', exchange_name)\n self._channel.exchange_declare(self.on_exchange_declareok,\n exchange_name,\n self.EXCHANGE_TYPE,\n durable=True)", "def on_queue_declared(self, frame):\n\t\tself.channel.basic_qos(prefetch_count=1)\n\t\tself.channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\t\tself.consumer_tag = self.channel.basic_consume(\n\t\t\tself.handle_delivery, \n\t\t\tframe.method.queue\n\t\t)", "def on_queue_declared(self, frame):\n self.channel.queue_bind(queue=self.qname, exchange=self.exchange_name, routing_key=self.qname,\n callback=self.on_queue_bind)\n print \"Binding queue %s to exchange %s\" % (self.qname, self.exchange_name)", "def acknowledge(self):", "def acq_done(self, pvname=None, **kws):\n if kws['value'] == 0:\n self.eventq.put('finish')", "def _declare_resources(self) -> None:\n\n self._channel.queue_declare(\n queue=settings.RMQ_QUEUE_QUOTES,\n durable=True,\n exclusive=False,\n auto_delete=False,\n )", "def setup(self):\n if options.verbose:\n print(\"{} setup\".format(self.name))\n chan = self.connection.channel()\n # setup the queue\n chan.queue_declare(queue=self.name, durable=False,\n exclusive=False, auto_delete=False)\n chan.queue_bind(queue=self.name, exchange=self.exname,\n routing_key=ROUTE_KEY)\n chan.queue_purge(self.name)\n chan.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get next report ID or False if not available
def next_id(self): try: return Report.objects.filter(id__gt=self.id).order_by("id").first().id except Exception: return False
[ "def get_next_report_id(self):\n max_report_id = 0\n for report in self.bug_reports:\n if report.report_id > max_report_id:\n max_report_id = report.report_id\n return max_report_id+1", "def nextId(self):\r\n \r\n nextId = -1\r\n if self._wizard.currentId() == SOURCE_PAGE_ID:\r\n nextId = TARGET_PAGE_ID\r\n elif self._wizard.currentId() == TARGET_PAGE_ID:\r\n nextId = DATASTORE_PAGE_ID\r\n elif self._wizard.currentId() == DATASTORE_PAGE_ID:\r\n nextId = PROPERTY_PAGE_ID\r\n return nextId", "def get_report_id(self, reports):\n matching_reports = [\n report for report in reports if report.get('title') in [\n self.api_report_id,\n self.api_test_report_id\n ]\n ]\n\n if self.electiondate: # Can also use the explicit 'if is not none'.\n matching_reports = [\n report for report in matching_reports\n if report.get('electionDate') == self.electiondate\n ]\n\n if matching_reports:\n id = matching_reports[0].get('id').rsplit('/', 1)[-1]\n return id\n\n return None", "def next_record_identifier(self) -> str:\r\n return self.data.get('NextRecordIdentifier')", "def _poll_for_report(mws_conn, report_request_id):\n retry_count = 0\n while True:\n retry_count += 1\n try:\n response = mws_conn.get_report_request_list(requestids=[request_id])\n break\n except MWSError as exc:\n print('Error requesting report request list.\\n\\n{0}'.format(exc))\n if retry_count == 3:\n time.sleep(_SLEEP_TIME)\n continue\n else:\n sys.exit()\n\n return response.parsed['ReportRequestInfo']['GeneratedReportId']['value']", "def _get_financial_report(self):\n self.ensure_one()\n\n line = self\n while line.parent_id or line.financial_report_id:\n if line.financial_report_id:\n return line.financial_report_id\n line = line.parent_id\n\n return False", "def previous_id(self):\n try:\n return Report.objects.filter(id__lt=self.id).order_by(\"-id\").first().id\n except Exception:\n return False", "def report_id(self):\n return self.data[0]", "def get_report_id(self, reports, key):\n\n for report in reports:\n if (\n key == 'delSum' and\n report.get('title') == 'Delegates / delsum'\n ) or (\n key == 'delSuper' and\n report.get('title') == 'Delegates / delsuper'\n ):\n id = report.get('id').rsplit('/', 1)[-1]\n return id\n\n return None", "def latest_report_id(self) -> str:\n return pulumi.get(self, \"latest_report_id\")", "def _GetNextId(self):\n ret = self.next_id\n self.next_id += 1\n return str(self.next_id)", "def get_next_free_id(self) -> int:\n id_ = 1\n while self.get_employee(id_):\n id_ += 1\n return id_", "def get_next_account_id():\n conn = get_connect()\n cursor = conn.execute(\"SELECT accountId FROM account WHERE isSearched = 0 LIMIT 1\")\n result_list = cursor.fetchone()\n conn.close()\n if result_list is None:\n print(\"no more accountId to be searched\")\n return None\n else:\n account_id = result_list[0]\n return account_id", "def _update_and_get_next_track_id(self):\n self._next_track_id += 1\n return self._next_track_id", "def get_next_id():\n global next_id\n with next_id_lock:\n the_id = next_id\n next_id += 1\n return the_id", "def get_next_submission(self):\r\n success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})\r\n self.assertEqual(success, True)", "def next_id(self):\n return self.max_id + 1", "def reserve_next_run_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('task_history', 'run_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def next_jid(self):\n return self._next_jid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get previous report ID or False if not available
def previous_id(self): try: return Report.objects.filter(id__lt=self.id).order_by("-id").first().id except Exception: return False
[ "def next_id(self):\n try:\n return Report.objects.filter(id__gt=self.id).order_by(\"id\").first().id\n except Exception:\n return False", "def get_report_id(self, reports):\n matching_reports = [\n report for report in reports if report.get('title') in [\n self.api_report_id,\n self.api_test_report_id\n ]\n ]\n\n if self.electiondate: # Can also use the explicit 'if is not none'.\n matching_reports = [\n report for report in matching_reports\n if report.get('electionDate') == self.electiondate\n ]\n\n if matching_reports:\n id = matching_reports[0].get('id').rsplit('/', 1)[-1]\n return id\n\n return None", "def report_id(self):\n return self.data[0]", "def latest_report_id(self) -> str:\n return pulumi.get(self, \"latest_report_id\")", "def has_report(self):\n return self.report is not None", "def _get_financial_report(self):\n self.ensure_one()\n\n line = self\n while line.parent_id or line.financial_report_id:\n if line.financial_report_id:\n return line.financial_report_id\n line = line.parent_id\n\n return False", "def get_next_report_id(self):\n max_report_id = 0\n for report in self.bug_reports:\n if report.report_id > max_report_id:\n max_report_id = report.report_id\n return max_report_id+1", "def get_report_id(self, reports, key):\n\n for report in reports:\n if (\n key == 'delSum' and\n report.get('title') == 'Delegates / delsum'\n ) or (\n key == 'delSuper' and\n report.get('title') == 'Delegates / delsuper'\n ):\n id = report.get('id').rsplit('/', 1)[-1]\n return id\n\n return None", "def get_GeneratedReportId(self):\n return self._output.get('GeneratedReportId', None)", "def check_id(self):\n if self.current_check:\n return self.current_check.id\n return None", "def ParentReportNumber(self, default=None):\n return self.data.get('parent_report_number', default)", "def check_report(self, op):\n self._update_reports()\n return self._get_last_report(op)", "def test_get_report(self):\n self.assertEqual(REPORT_ID, get_report(self.database, REPORT_ID)[\"reports\"][0][\"report_uuid\"])", "def _poll_for_report(mws_conn, report_request_id):\n retry_count = 0\n while True:\n retry_count += 1\n try:\n response = mws_conn.get_report_request_list(requestids=[request_id])\n break\n except MWSError as exc:\n print('Error requesting report request list.\\n\\n{0}'.format(exc))\n if retry_count == 3:\n time.sleep(_SLEEP_TIME)\n continue\n else:\n sys.exit()\n\n return response.parsed['ReportRequestInfo']['GeneratedReportId']['value']", "def originality_report_file_id(self):\n return self._originality_report_file_id", "def _previous_id(self, identifier: Identifier) -> Optional['Identifier']:\n previous_id = None\n if identifier.year is not None and \\\n identifier.month is not None and \\\n identifier.num is not None:\n new_year = identifier.year\n new_month = identifier.month\n new_num = identifier.num - 1\n if new_num == 0:\n new_month = new_month - 1\n if new_month == 0:\n new_month = 12\n new_year = new_year - 1\n\n if identifier.is_old_id:\n if new_num == 0:\n new_num = 999\n previous_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n else:\n if new_year >= 2015:\n if new_num == 0:\n new_num = 99999\n previous_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n if new_num == 0:\n new_num = 9999\n previous_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n try:\n return Identifier(arxiv_id=previous_id)\n except IdentifierException:\n return None\n else:\n return None", "def PAID(self):\n if self.session.get('last_bill_result', None) is None:\n return False\n return self.session['last_bill_result'] == \"\"", "def is_specific_myreport_not_present(self, myreport_name):\n is_not_present = True\n self.switch_to_list_frame()\n myreport_name_locator = (By.XPATH, \"//span[text()='%s']\" % myreport_name)\n try:\n self.wait(5).until(EC.presence_of_element_located(myreport_name_locator))\n is_not_present = False\n except:\n raise\n finally:\n self.switch_to_default_content()\n return is_not_present", "def get_ReportRequestId(self):\n return self._output.get('ReportRequestId', None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs HttpRequest from string containing an entire HTTP request
def deserialize(cls, data: bytes) -> HttpRequest: try: raw = data.decode("utf-8") raw_headers, raw_body = raw.split("\r\n\r\n") header_lines = raw_headers.split("\r\n") method, path, protocol = header_lines[0].split() headers = HttpRequest._parse_headers(header_lines[1:]) if "content-length" in headers: body = raw_body.encode("utf-8") else: body = b"" return HttpRequest(method, path, headers, body) except Exception as err: raise exceptions.HttpRequestParsingException( f"Failed to parse {data.decode('utf-8')}" )
[ "def request_from_text(text):\n lines = text.splitlines()\n match = re.search('^([a-z]+) (.*) (http/[0-9]\\.[0-9])$', lines[0], re.I)\n method, path, version = match.groups()\n headers = {}\n for idx, line in enumerate(lines[1:], start=1):\n if not line:\n break\n hdr, val = [item.strip() for item in line.split(':', 1)]\n hdr = hdr.lower()\n vals = headers.setdefault(hdr, [])\n vals.append(val)\n headers = {hdr: ','.join(sorted(vals)) for hdr, vals in headers.items()}\n check_url = urlparse(path)\n if check_url.scheme and check_url.netloc:\n # absolute URL in path\n url = path\n else:\n # otherwise need to try to construct url from path and host header\n url = ''.join(['http://' if 'host' in headers else '',\n headers.get('host', ''),\n path])\n body = '\\n'.join(lines[idx+1:])\n req = requests.Request(method, url, headers=headers, data=body)\n return req.prepare()", "def build_request(path, body='', http_headers=None):\n (unused_scheme, unused_netloc, path, query,\n unused_fragment) = urlparse.urlsplit(path)\n env = {'SERVER_PORT': 42, 'REQUEST_METHOD': 'GET',\n 'SERVER_NAME': 'localhost', 'HTTP_CONTENT_TYPE': 'application/json',\n 'PATH_INFO': path, 'wsgi.input': cStringIO.StringIO(body)}\n if query:\n env['QUERY_STRING'] = query\n\n if http_headers:\n for header, value in http_headers:\n header = 'HTTP_%s' % header.upper().replace('-', '_')\n env[header] = value\n\n cgi_request = api_request.ApiRequest(env)\n return cgi_request", "def decode_request(value):\n request = HttpRequest()\n request.GET = MultiValueDict(value['GET'])\n request.POST = MultiValueDict(value['POST'])\n request.COOKIES = value['COOKIES']\n request.META = value['META']\n request.path = value['path']\n request.method = value['method']\n request.path_info = value['path_info']\n request.response_channel = value['response_channel']\n return request", "def load_request_string(string, format=FORMAT_PEM):\n bio = BIO.MemoryBuffer(string)\n return load_request_bio(bio, format)", "def parse_request(msg):\n start_line, header, body = _parse_message(msg)\n request, path = _parse_request_line(start_line)\n return Request(request, path, header, body)", "def to_httpx_request(cls, **kwargs):\n request = kwargs[\"request\"]\n raw_url = (\n request.url.scheme,\n request.url.host,\n request.url.port,\n request.url.target,\n )\n return httpx.Request(\n request.method,\n parse_url(raw_url),\n headers=request.headers,\n stream=request.stream,\n extensions=request.extensions,\n )", "def parse_request_start_line(line):\r\n try:\r\n method, path, version = line.split(\" \")\r\n except ValueError:\r\n raise HTTPInputException(\"Malformed HTTP request line\")\r\n if not version.startswith(\"HTTP/\"):\r\n raise HTTPInputException(\r\n \"Malformed HTTP version in HTTP Request-Line: %r\" % version)\r\n return RequestStartLine(method, path, version)", "def urllib_req_to_req(urllib_request):\n from ..networking import Request\n from ..utils.networking import HTTPHeaderDict\n return Request(\n urllib_request.get_full_url(), data=urllib_request.data, method=urllib_request.get_method(),\n headers=HTTPHeaderDict(urllib_request.headers, urllib_request.unredirected_hdrs),\n extensions={'timeout': urllib_request.timeout} if hasattr(urllib_request, 'timeout') else None)", "def make_request_message(request):\n url = urlparse(request.url)\n request_headers = dict(request.headers)\n if 'Host' not in request_headers:\n request_headers['Host'] = url.netloc\n return HTTPMessage(\n line='{method} {path} HTTP/1.1'.format(\n method=request.method,\n path=url.path or '/'),\n headers=NEW_LINE.join(str('%s: %s') % (name, value)\n for name, value\n in request_headers.items()),\n body=request._enc_data,\n content_type=request_headers.get('Content-Type')\n )", "def createRequest(test, url, headers=None):\n request = HTTPRequest(url=url)\n if headers: request.headers=headers\n test.record(request, HTTPRequest.getHttpMethodFilter())\n return request", "def __call__(self, requestStr):\n return self.connection.Request(requestStr)", "def parse_http_request(source_addr, http_raw_data):\n r1 = http_raw_data.split('\\n')[0]\n method = r1.split()[0]\n path = r1.split()[1]\n if path == \"/\":\n r2 = http_raw_data.split('\\n')[1]\n host = r2.split()[0]\n if host == \"Host:\":\n host = re.sub(\"[:]\", \"\", host)\n r3 = r2.split(':')\n url = r2.split()[1]\n headers = []\n r3 = ' '.join(r3).replace('\\r', '').split()\n headers.append(r3)\n headers.append(url)\n headers\n requested_host = headers[0:]\n requested_path = path\n portno = re.findall(r'[0-9]+', r2)\n if portno == []:\n portno = \"80\"\n requested_port = portno\n requested_host = url\n print(\"*\" * 50)\n print(\"[parse_http_request] Implement me!\")\n print(\"*\" * 50)\n # Replace this line with the correct values.\n request_info = HttpRequestInfo(source_addr, method, requested_host, requested_port, requested_path, headers)\n return request_info", "def build_request(self):\n # TODO: Switch to requests\n request_data = self.request_data\n\n if six.PY3 and request_data:\n request_data = six.ensure_binary(request_data)\n\n request = six.moves.urllib.request.Request(self.url, data=request_data)\n\n for header_key, header_value in six.iteritems(self._base_headers):\n request.add_header(header_key, header_value)\n\n if self.request_headers:\n for header in self.request_headers:\n request.add_header(header[\"header\"], header[\"value\"])\n\n # seems awkward to override the GET method, but internally it flips\n # between GET and POST anyway based on the existence of request body\n request.get_method = lambda: self.request_method\n return request", "def _constructGetRequest(self, path):\n # Got some help with minor formatting errors from Izzy \n # Worked on the general appraoch with Ivy\n rn = \"\\r\\n\"\n return rn.join([\"GET %s HTTP/1.0\" % path, \"Host: %s\" % self.host, \n \"User-Agent: COMP346Sen\", \"\", \"\"])", "def prepare_request(http_request_name, request_params, timeout=1):\n request_string = DefaultParams.request_base_url + http_request_name\n log.debug('Request string is %s' % request_string)\n response = Request.send_request(request_string, request_params, timeout)\n return response", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = HTTP_1_1,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = b'HTTP/1.1',\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def from_json(json_string: str) -> AnalysisRequest:\n dict_obj = json.loads(json_string)\n\n # make sure the required parameters are present\n required_fields = [\"request_id\"]\n\n for field in required_fields:\n if field not in dict_obj:\n raise Exception(\"JSON string does not represent a DatasetRequest object. Missing \" + field)\n\n # create the object\n request_obj = AnalysisRequest(request_id=dict_obj[\"request_id\"])\n\n return request_obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses headers to a dictionary from a list of strings
def _parse_headers(raw_headers: List[str]) -> Dict[str, str]: headers: Dict[str, str] = {} for header in raw_headers: name = header[: header.find(":")].strip() value = header[header.find(":") + 1 :].strip() headers[name.lower()] = value return headers
[ "def process_headers(self, listed_data):\n\t\treturn { val.rstrip().split(\": \")[0]: val.rstrip().split(\": \")[1] for val in listed_data }", "def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)", "def headers_raw_to_dict(headers_raw):\n\n if headers_raw is None:\n return None\n headers = headers_raw.splitlines()\n headers_tuples = [header.split(':', 1) for header in headers]\n\n result_dict = {}\n for header_item in headers_tuples:\n if not len(header_item) == 2:\n continue\n\n item_key = header_item[0].strip()\n item_value = header_item[1].strip()\n result_dict[item_key] = item_value\n\n return result_dict", "def _parse_headers(self):\n\n headers = self._buf.strip().split('\\r\\n')\n for header in headers:\n (key, value) = header.split(': ')\n value = value.strip()\n key = key.lower()\n if key in self._headers:\n if isinstance(self._headers[key], list):\n self._headers[key].append(value)\n else:\n self._headers[key] = [self._headers[key], value]\n else:\n self._headers[key] = value", "def readHeaders(lines):\n\n result = {}\n for line in lines:\n if line[0] == \"#\":\n continue\n if line.strip() == \"\":\n continue\n key, value = line.split(\":\", 1)\n result[key.strip()] = parseSloppily(value)\n return result", "def samplesheet_headers_to_dict(samplesheet_headers: list) -> dict:\n attr_dict = {}\n for line in samplesheet_headers:\n components = line.split(',')\n _attr = components[0].lower().replace(\" \", \"_\")\n try:\n _val = components[1]\n except IndexError:\n _val = \"\"\n attr_dict[_attr] = _val\n return attr_dict", "def updateheader(self, headerlist=[], http_s_obj=None):\n header = {}\n for headerparam in headerlist:\n key_value = headerparam.split(\":\", 1)\n if len(key_value) == 2:\n try:\n key = key_value[0]\n value = key_value[1].strip()\n header.update({key: value})\n if http_s_obj:\n if http_s_obj.header.get(key):\n http_s_obj.header.update({key: value})\n except Exception:\n continue\n return header", "def _parse_header(lines):\n # The dict into which we will store header fields.\n header = {}\n # Loop over lines in the header.\n for line in lines:\n # Find the first colon.\n index = line.index(COLON)\n # Up to the colon is the field name.\n name = line[: index]\n # After the colon is the field value.\n value = line[index + 1 :]\n # The field value may begin or end with extra space, which is not \n # significant. Remove it.\n value = value.strip()\n # Store the field.\n header[name] = value\n # All done.\n return header", "def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())", "def _parse_header(header):\n parts = header.split() # Splits on one or more spaces\n name_val_pairs = zip(parts[::2], parts[1::2])\n return dict((name.replace(\"=\", \"\"), val) for name, val in name_val_pairs)", "def _parseOutHeaders(self, content):\n headers, body = content.split(b'\\r\\n\\r\\n')\n headers = headers.split(b'\\r\\n')\n requestLine = headers.pop(0)\n return (\n requestLine, dict(header.split(b': ') for header in headers), body)", "def split_headers(headers):\n amz_headers = {}\n reg_headers = {}\n for cur in headers:\n if cur.lower().startswith('x-amz-'):\n amz_headers[cur] = headers[cur]\n else:\n reg_headers[cur] = headers[cur]\n return (amz_headers, reg_headers)", "def _split_headers(headers):\n amz_headers = {}\n reg_headers = {}\n for cur in headers:\n if cur.lower().startswith('x-amz-'):\n amz_headers[cur] = headers[cur]\n else:\n reg_headers[cur] = headers[cur]\n return (amz_headers, reg_headers)", "def extract_header(self, string): \n\n header_list = re.findall(r\"\\$\\*(.*)\\*\\$\", string)[0].split(\",\")\n header = {}\n for i in header_list:\n spl = i.split(\":\")\n header[spl[0]] = spl[1]\n\n return header", "def _parse_header(self, line):\n if self._regex_helper.search_compiled(W._re_header, line):\n if not self.headers:\n for value in re.findall(W._re_header, line):\n self.headers.append(value[0])\n raise ParsingDone\n else:\n # Dictionary which is going to be appended to the returned list\n ret = dict()\n # List of entries\n _entries = list()\n # List of values in WHAT entry\n _what_entry = list()\n for value in re.findall(W._re_header, line):\n _entries.append(value[0])\n for what_index in range(len(self.headers) - 1, len(_entries)):\n _what_entry.append(_entries[what_index])\n _what_entry_string = ' '.join(_what_entry)\n for index in range(len(self.headers)):\n if index < len(self.headers) - 1:\n ret.update({self.headers[index]: _entries[index]})\n else:\n ret.update({self.headers[index]: _what_entry_string})\n self.current_ret['RESULT'].append(ret)\n raise ParsingDone", "def parse_header(header_lines):\n info = {}\n for line in header_lines:\n if line.startswith('Citation'):\n info['Citation'] = line.split()[-1].strip()\n elif ':' in line:\n try:\n field, value = map(strip,line.split(':',1))\n info[field] = value\n except ValueError:\n #no interesting header line\n continue\n else:\n continue\n return Info(info)", "def headers(list_of_dicts):\n headers = list(list_of_dicts[0].keys())\n return headers", "def _convert_list_tuples_to_dict(self, headers_list):\n # type: (List[Tuple[str, str]]) -> Dict[str, str]\n headers_dict = {} # type: Dict\n if headers_list is not None:\n for header_tuple in headers_list:\n key, value = header_tuple[0], header_tuple[1]\n if key in headers_dict:\n headers_dict[key] = \"{}, {}\".format(\n headers_dict[key], value)\n else:\n headers_dict[header_tuple[0]] = value\n return headers_dict", "def parse_spec_header(self, header):\n spec_dict = {}\n #\n core_meta_keys = list(meta.define_core_meta().keys())\n core_meta_keys += ['filename']\n for key in core_meta_keys:\n if key.upper() in header.keys():\n spec_dict[key.upper()] = header[key.upper()]\n # Return\n return spec_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reemplaza la(s) ocurrencia(s) de tag en el archivo por nstr
def rep(self,tag,nstr): tmp = [] for line in self.content: if tag in line: tmp.append(line.replace(tag,nstr)) else: tmp.append(line) self.content = tmp
[ "def addTagToFile(self, fi, tag):", "def getTagsToFile(self, _file):", "def __connectTagsToFile(self, tags, fid):", "def count_tags(filename):\n tags = {}\n for element in get_element(osm_file, verify_tags = False):\n if element.tag not in tags.keys():\n tags[element.tag] = 1\n else:\n tags[element.tag] += 1\n return tags", "def merge_tags():\r\n filenames = glob(os.path.join('data', 'corpus', 'tags', '*Tags.txt'))\r\n with open(os.path.join('data', 'corpus', 'tags', '_iotsec_tags.txt'), 'w', encoding='utf-8') as file:\r\n for filename in filenames:\r\n with open(filename, 'r', encoding='utf-8') as f:\r\n file.write(f.read() + '\\n')", "def ler_arquivo_xml(self, diretorio):\r\n with open(diretorio, 'r') as fxml:\r\n\t strfx = fxml.readlines()\r\n\t string = \"\".join(strfx).replace(\"&\",\" e \")\r\n return string", "def extract_tags_from_xml(filename):\r\n tree = ET.parse(filename)\r\n root = tree.getroot()\r\n\r\n path, ext = filename.split('.')\r\n if '/' in path:\r\n oldDir1, oldDir2, newFilename = path.split('/')\r\n else:\r\n oldDir1, oldDir2, newFilename = path.split('\\\\')\r\n newFile = open(os.path.join('data', 'corpus', 'tags', newFilename + '.txt'), 'w')\r\n\r\n count = 0\r\n for row in root.findall('row'):\r\n tag_name = row.get('TagName')\r\n newFile.write(tag_name + ' ')\r\n count += 1\r\n newFile.close()", "def descifrarArchivo(self, archivo, n, clave):\n\t\ttry:\n\t\t\tmetadatos = self.obtenerArchivoMetadatos(archivo)\n\t\t\tnombre = metadatos[0]\n\t\t\textension = metadatos[1]\n\t\texcept TypeError as te:\n\t\t\tprint(te , \" descifrarArchivo -- Template\")\n\n\t\t\n\t\tcadenaB64 = self.utilidad.leerArchivo(self.utilidad.dirSalida+nombre+extension+\".CIF\", \"r\")\n\n\t\tcadena = list()\n\t\tcadena.append(cadenaB64)\n\n\t\ttextoClaro = self.modoDescifrar(cadena, 0, clave)\n\t\ttextoClaro = self.utilidad.cadena_a_Base64(textoClaro)\n\t\tself.utilidad.crearArchivo(nombre+extension, textoClaro, \"wb\")", "def updateFile(self, fi):\n\t \t#TODO update backup state!\n\t \tnew_tags = []\t#Tags from file object (ATTENTION! They get filtered later!)\n\t\told_tags = []\t#Tags from database\n\t\tif self.fileInDB(fi):\n\t\t\told_tags = self.getTagsToFile(fi)\n\t\t\tnew_tags = fi.getTags()\n\t\t\tfor row in old_tags:\n\t\t\t\ttry:\n\t\t\t\t\t#remove all occurences of old tags from the new tag array, so only new tags are left\n\t\t\t\t\tnew_tags = filter (lambda a: a != row, new_tags)\n\t\t\t\texcept:\n\t\t\t\t\tprint \"Error while removing element from old_tags\"\n\t\t\tself.cursor.execute(\"SELECT files.fid FROM files WHERE files.filename = ? AND files.path = ?\", (fi.getFileName(), fi.getPath(), ))\n\t\t\tfid = self.cursor.fetchall()[0][0]\n\t\t\tif len(new_tags) > 0:\n\t\t\t\tself.__connectTagsToFile(new_tags, fid)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t\t#Remove old tags from database\n\t\t\tdeprecatedTags = old_tags\n\t\t\tfor row in fi.getTags():\n\t\t\t\ttry:\n\t\t\t\t\t#Remove all tags tags of the file object from the array of tags that are in the database\n\t\t\t\t\t#This leaves us with just the tags that are in the database but that are NOT in the file object\n\t\t\t\t\tdeprecatedTags = filter(lambda a: a != row, deprecatedTags)\n\t\t\t\texcept:\n\t\t\t\t\tprint \"\"\n\t\t\tif len(deprecatedTags) > 0:\n\t\t\t\ttagids = []\n\t\t\t\tfor line in deprecatedTags:\n\t\t\t\t\tself.cursor.execute(\"SELECT tagid FROM tagnames WHERE tagname = ?\", (line, ))\n\t\t\t\t\ttagids.extend(self.cursor.fetchall()[0])\n\t\t\t\tfor line in tagids:\n\t\t\t\t\tself.cursor.execute(\"DELETE FROM file_tag_relations WHERE fk_tagid = ? AND fk_fid = ?\", (line, fid, ))\n\t\t\t\tself.connection.commit()\n\t\t\t\tself.__cleanupTags()\n\t\telse:\n\t\t\tself.addFile(fi)", "def MEMMTagFile(self):\n raw = self.readFile(self.devFileName)\n outFile = open(self.outFileName, 'w')\n tokenList = []\n className = 'None'\n for line in raw:\n line = line.strip().split()\n if not line:\n print ('tagging sentence: {0}'.format(' '.join([tokenList[i][0] for i in xrange(0, len(tokenList))])))\n t = time.time()\n self.MEMMTagSentence(tokenList, className, outFile)\n print('time to tag in minutes: ', (time.time() - t) / 60.0)\n tokenList = []\n outFile.write('\\n')\n className = 'None'\n else:\n # guarantee that each token in tokenList has 7 items exactly.\n if len(line) == 5:\n line.append('None')\n if len(line) == 6:\n line.append('None')\n if 7 > len(line) > 0:\n raise Exception(\"Something went wrong... line: {0}\".format(line))\n if line[5] == 'PRED':\n className = line[6]\n tokenList.append(tuple(line))\n if len(tokenList) > 0:\n print ('tagging sentence: {0}'.format(' '.join([tokenList[i][0] for i in xrange(0, len(tokenList))])))\n # get the className\n className = [line[6] for line in tokenList if line[5] == 'PRED']\n if not className or className[0] == 'None':\n raise Exception(\"Something went wrong at the end of the file. TokenList = {0}\".format(tokenList))\n className = className[0]\n self.MEMMTagSentence(tokenList, className, outFile)\n outFile.close()", "def handleKeepFileTag(tag):\n infoString = tag.getAttribute('info')\n displayColor = tag.getAttribute('displayColor')\n\n keepString = getText(tag.childNodes)\n\n return {'keepFileString':keepString,\n 'infoString':infoString,\n 'displayColor':displayColor}", "def process_file_label(self):\n label_list = []\n for character in self.file_label:\n if character.lower() == 'r':\n label_list.append('read')\n elif character.lower() == 'b':\n label_list.append('barcode')\n self.file_label = label_list\n self.barcode_count = label_list.count('barcode')\n self.read_count = label_list.count('read')", "def writeFile (standard, predict, targetlang):\r\n print(\"writing\")\r\n with open(targetlang+\".tag\", \"w\", encoding = \"utf8\") as f:\r\n for sent in predict.keys():\r\n if sent < 1380214:\r\n for i in range(len(standard[sent])):\r\n word = standard[sent][i][0]\r\n predictTag = predict[sent][i]\r\n f.write(word+\"\\t\"+predictTag+\"\\n\")\r\n f.write(\"\\n\")", "def getFile(self, fi):", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def importXML(docname):#Le Monstrume\n vector3D.clearVectorList()\n localName = \"saves/\"+str(docname)\n actualName = \"\"\n test = localName[-4]+localName[-3]+localName[-2]+localName[-1]\n if test == \".txt\" or test ==\".xml\":\n #localName += test\n actualName=localName\n VecFile = open(str(localName), \"r\")\n else:\n #localName += test\n localName2 = str(localName)\n try:\n localName += \".xml\"\n actualName = localName\n VecFile= open(str(localName), \"r\")\n except NameError:\n try:\n localName2 += \".txt\"\n actualName = localName\n VecFile=open(str(localName2), \"r\")\n except NameError:\n print(\"Es gab leider ein Problem!\")\n actualName=None\n return None\n localLength = sum(1 for line in VecFile)\n VecFile.close()\n VecFile = open(str(actualName), \"r\")\n if localLength%6 != 0:\n print(\"Datei ist nicht richtig Formatiert!\")\n return None\n count=1\n altcount = 1\n while count < (localLength-4):\n actID = 0\n actX = 0\n actY = 0\n actZ = 0\n while altcount < 13:\n actLine = vector3D.extractNumbersFromInput2(VecFile.readline())\n if count == (localLength-2):\n break\n elif altcount < 6:\n pass\n elif altcount == 6:\n actID = int(actLine)\n elif altcount == 7:\n actX = float(actLine)\n elif altcount == 8:\n actY = float(actLine)\n elif altcount == 9:\n actZ = float(actLine)\n elif altcount == 10:\n pass\n elif altcount == 11:\n pass\n altcount+=1\n count+=1\n #print(str(count)+\", \"+str(altcount)) #troubleshooting\n #print(str(actID))#hier auch\n vector3D(actX, actY, actZ, actID)\n altcount=7\n VecFile.close()\n return actualName", "def tag(referencefile):\n dirpath = path.abspath(referencefile)\n\n if path.isdir(dirpath):\n dircontents = listdir(dirpath)\n else:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n\n while not 'tag' in dircontents:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n if len(dircontents) == 0 or path.split(dirpath)[1] == 'chemistry':\n print(\"tag file not found\")\n return None\n\n return path.join(dirpath, 'tag')", "def quran_words_frequences_data(fileName):\n\n # Computing unique words\n unique_words = get_unique_words()\n comma_separated_unique_words = ''\n for word in unique_words:\n comma_separated_unique_words += word + ','\n\n # Removing the extra commas\n comma_separated_unique_words = comma_separated_unique_words.strip(',')\n\n\n\n # * Creating quran_words_frequences_data -- the root tag\n root = Element('quran_words_frequences')\n root.set('unique_words', comma_separated_unique_words)\n\n # * Add root to the tree\n tree = ElementTree(root)\n\n\n for suraNumber in range(1, 114 +1):\n\n sura = quran.get_sura(suraNumber)\n\n # * Creating sura Tag\n suraTag = Element('sura')\n\n # * set number attribute\n suraTag.set('number', str(suraNumber))\n\n # * set sura unique words\n # ??? update get_unique_words\n # suraTag.set('sura_unique_words', suraUniquewords)\n\n ayaCounter = 1\n for aya in sura:\n\n # Create aya Tag\n ayaTag = Element('aya')\n ayaTag.set('number', str(ayaCounter))\n\n # * Computes the words frequency for aya\n ayaWordsDict = get_frequency(aya)\n\n words_comma_separated = ''\n occurrence_comma_separated = ''\n\n for word in ayaWordsDict:\n words_comma_separated += word + ','\n occurrence_comma_separated += str(ayaWordsDict[word]) + ','\n\n # * The same order\n words_comma_separated = words_comma_separated.strip(',')\n occurrence_comma_separated = occurrence_comma_separated.strip(',')\n\n # * Add words & frequencies attributes\n ayaTag.set('unique_words', words_comma_separated)\n ayaTag.set('unique_words_frequencies', occurrence_comma_separated)\n\n\n # * Add aya tag to sura tag\n suraTag.append(ayaTag)\n\n ayaCounter += 1\n\n # * add suraTag to the root\n root.append(suraTag)\n\n\n # print(prettify(root))\n\n file = open(fileName, 'w')\n file.write(prettify(root))\n file.close()", "def mol_file_basename(tag):\n if str(tag)[0] == '0':\n return 'c'+tag+'.'\n else:\n return 'c%06d.' % tag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Salva los cambios al archivo especificado en newfile, o hace una copia del archivo original (filename+'~') y salva el contenido en "filename"
def saveFile(self,newfile=None): if newfile == None: shutil.move(self.filename,self.filename+'~') self.handler = open(self.filename,'w') else: self.handler = open(newfile,'w') self.handler.writelines(self.content) self.handler.close()
[ "def overwrite_original(self) -> None:\n self._write_save_file(self._file_path, \"wb\")", "def write_output_file(updated_file, file_path):\n orig_file = file_path + \".orig\"\n # remove an existion .orig file\n if os.path.isfile(orig_file):\n os.remove(orig_file)\n # rename the current file\n os.rename(file_path, orig_file)\n # write the new file\n with open(file_path, mode='w', encoding='utf-8', newline='') as file_out:\n for line in updated_file:\n file_out.write(line)", "def filename_to_modify(self, filename):", "def encrypt_file(filename, key):\n # init fermet\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read all file data\n file_data = file.read()\n # encrypt data\n encrypted_data = f.encrypt(file_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, True)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Encrypted: \" + new_filename)\n file.write(encrypted_data)\n\n return new_filename", "def store(self, photo_file, new_path):\n\t\tos.copy(photo_file, os.path.join(self.path, new_path))", "def copy_file_to_archive(self):\n os.rename(\n os.path.join(self._photo._file_path),\n os.path.join(self._used_file_folder, self._photo._file_name),\n )\n LOGGER.debug(\n f\"Copying photo from {self._photo._file_path} \"\n f\"to {self._used_file_folder}\"\n )", "def save_and_backup_file(save_dir, filename, content, mode='w'):\n filename = os.path.join(save_dir, filename)\n if os.path.exists(filename):\n os.rename(filename, filename + '~')\n f = open(filename, mode)\n f.write(content)\n f.close()", "def rewrite(cls, filename_1: str, filename_2: str) -> None:\n shutil.copyfile(filename_1, filename_2)", "def createBackup(self, filename):\n if (not os.path.isfile(filename + '.bak')) and os.path.isfile(filename):\n with open(filename + '.bak', 'wb') as bakf:\n with open(filename, 'rb') as oldf:\n bakf.write(oldf.read())\n print(filename + \" backed up\")", "def clean_file(filename, new_name=None):\n import os\n\n if new_name is None:\n try:\n os.remove(filename)\n except OSError:\n pass\n else:\n try:\n os.rename(filename, new_name)\n except OSError:\n pass", "def update_file(src, dest, chunk_size=1024):\r\n dest_checksum, hashlib_checksum = file_to_hashlist(dest)\r\n instruction = rolling_checksum(src, dest_checksum)\r\n temp_name = \"dest_temp\"\r\n new_fd = os.open(temp_name, os.O_CREAT | os.O_WRONLY)\r\n for data in instruction:\r\n if data in hashlib_checksum.keys():\r\n os.write(new_fd, hashlib_checksum[data])\r\n else:\r\n os.write(new_fd, data)\r\n os.close(new_fd)\r\n os.remove(dest)\r\n os.rename(temp_name, dest)\r\n change_same_permission(src, dest)\r\n change_same_time(src, dest)", "def single_file_write(self, file_pointer, filename):\n temp_file = \"resources/temp_file\"\n\n file_pointer.seek(0)\n with open(temp_file, \"wb\") as output_file:\n shutil.copyfileobj(file_pointer, output_file)\n\n os.rename(temp_file, filename)\n log.info(\"Saved file: %s\", filename)", "def _sudo_write_file(self, filename, contents):\n with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n temp_file.write(contents)\n\n self._sudo(\"mv\", temp_file.name, filename)", "def add_file_to_zipfile(new_zipfile, name, new_name):\n if not new_zipfile or not name or not new_name:\n return\n new_zipfile.write(name, new_name)", "def copy_file(oldpath, newpath, ext=''):\n from shutil import copy as cp\n\n check_file_exists(oldpath + ext)\n\n if os.path.isfile(newpath + ext):\n os.remove(newpath + ext)\n\n cp(oldpath + ext, newpath + ext)", "def renameFile(self, filename, newname):\n f = self.getEntry('File', filename)\n f.filename = newname\n self.session.add(f)\n self.commitDB()", "def replace_file(filename, marker, new_contents):\n assert marker in new_contents\n try:\n with open(filename, 'rb') as f:\n old_contents = f.read()\n if old_contents == new_contents:\n return False\n if marker not in old_contents:\n raise Error('Refusing to overwrite %s' % filename)\n except IOError as e:\n if e.errno == errno.ENOENT:\n pass\n else:\n raise\n mkdir_with_parents(os.path.dirname(filename))\n with open(filename + '.tmp', 'wb') as f:\n f.write(new_contents)\n os.rename(filename + '.tmp', filename)\n return True", "def overwrite_original_file(self, value):\n self.__overwrite_original_file = value", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
``summary'' is a systemgenerated summary. ``references'' is a list of humanmade reference summaries
def score_summary(self, summary, references, summary_id='A'): try: self._write_config(references, Doc(summary_id, summary)) output = self._run_rouge() output = output.decode("utf-8") return self._parse_output(output) except CalledProcessError as e: print("Rouge returned a non-zero error code. Output was: ", file=sys.stderr) print("BEGIN OUTPUT ", file=sys.stderr) print(e.output, file=sys.stderr) print("END OUTPUT", file=sys.stderr) raise e finally: self._cleanup()
[ "def summary(self, summary):\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def print_summary_and_genomes(summary, genome):\n for sample in summary:\n for ref in summary[sample]:\n if ref == \"metadata\":\n continue\n final = {\n \"sample_identifier\": sample,\n \"reference_organism\": ref,\n \"metadata\": summary[sample][\"metadata\"]\n }\n if genome.get(sample) and genome[sample].get(ref):\n final.update({\"status\": \"complete\"})\n # Add summary statistics\n final.update(summary[sample][ref])\n # Add genomic sequences\n final.update(genome[sample][ref])\n else:\n final.update({\"status\": \"notMapped\"})\n print(json.dumps(final))", "def AddSummary(self, other_summary):\n for feature in self._summary:\n self._summary[feature]['total'] += other_summary.Get()[feature]['total']\n self._summary[feature]['covered'] += other_summary.Get()[feature][\n 'covered']", "def display_summary(loan, reference_date=None):\n\n reference_date = reference_date or loan.start_date\n\n dates = [reference_date] + loan.return_dates\n\n lines = list(\n zip(\n dates,\n map(lambda d: (d - reference_date).days,\n dates),\n loan.balance,\n [''] + loan.amortizations,\n [''] + loan.interest_payments,\n [''] + loan.due_payments,\n )\n )\n\n separator = ('+------------+----------+--------------'\n '+--------------+--------------+--------------+')\n\n header = ('| dates | days | balance '\n '| amortization | interest | payment |')\n\n trailing_line = (\n '| {:>8} | {:>8d} | {:>12.2f} | '\n '| | |'\n .format(*[lines[0][0].isoformat()] + list(lines[0][1:3]))\n )\n\n body_line = ('| {:>8} | {:>8d} | {:>12.2f} '\n '| {:>12.2f} | {:>12.2f} | {:>12.2f} |')\n\n footer_line = (\n '| | | '\n '| {:>12.2f} | {:>12.2f} | {:>12.2f} |'\n .format(\n loan.total_amortization,\n loan.total_interest,\n loan.total_paid\n )\n )\n\n summary = '\\n'.join(\n [\n separator,\n header,\n separator,\n trailing_line,\n ] + [\n body_line.format(\n line[0].isoformat(),\n line[1],\n *list(\n map(\n lambda n: Decimal(n).quantize(\n Decimal('0.01'),\n rounding=ROUND_HALF_UP\n ),\n line[2:])\n )\n )\n for line in lines[1:]\n ] + [\n separator,\n footer_line,\n separator,\n ]\n )\n\n print(summary)", "def orders_summary(self, orders_summary):\n\n self._orders_summary = orders_summary", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def _add_summary(self, deets, history):\n deets['summary'] = {\n trt: [self._summarizer(amt, trt) for amt in amts]\n for trt, amts in history.items()}\n deets['summary description'] = self._summary_description\n deets['caucus'] = self._caucus_amount(history)", "def retrieve_refseq_summary():\n\n if not os.path.exists(\"assembly_summary_refseq.txt\"):\n subprocess.call('wget ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/assembly_summary_refseq.txt', shell=True)\n\n refseq = pd.read_csv(\"assembly_summary_refseq.txt\", sep='\\t', header=1, index_col=False)\n\n refseq = refseq[['# assembly_accession', 'organism_name', 'taxid', 'species_taxid', 'ftp_path']]\n\n\n return refseq", "def summary(id_reference):\n\n LOGGER.info(\"Fetching remote summary for %s\", id_reference)\n data = asyncio.run(get_data(id_reference))\n if data.get(\"hitCount\", 0) == 0:\n raise UnknownReference(id_reference)\n\n if data[\"hitCount\"] == 1:\n if not data[\"resultList\"][\"result\"][0]:\n raise UnknownReference(id_reference)\n\n return data[\"resultList\"][\"result\"][0]\n\n # TODO: Implement proper usage of pagination.\n possible = []\n for result in data[\"resultList\"][\"result\"]:\n if not result:\n continue\n\n external_id = str(result[id_reference.namespace.name])\n if external_id == id_reference.external_id:\n possible.append(result)\n\n if len(possible) == 1:\n return possible[0]\n\n raise TooManyPublications(id_reference)", "def summaries(self, summaries):\n if summaries is None:\n raise ValueError(\"Invalid value for `summaries`, must not be `None`\")\n\n self._summaries = summaries", "def _summary(obj):\n return obj.summary", "def _print_summary(self):\n pass", "def summary_lines(self, summary_lines):\n\n self._summary_lines = summary_lines", "def test_get_brief_summary_output(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected results \n self.assertEqual(resource.get_brief_summary(), \"Delillo's White \"\\\n \"Noise follows narrator Jack Gladney, a professor \"\\\n \"at a \\nsmall Liberal Arts college and describes an \"\\\n \"academic year. Jack teaches \\nat ...\")", "def _add_summary(self, deets, amounts):\n summary = {\n trt: self._summarizer(amt, trt) for trt, amt in amounts.items()}\n deets['summary'] = summary\n deets['summary description'] = self._summary_description\n deets['caucus'] = summary", "def printSummary(self):\n pass", "def references(self, references):\n\n self._references = references", "def _parse_summary_human(self, doc):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display unpublished Draft Entries
def drafts(): query = Entry.drafts().order_by(Entry.last_mod_date.desc()) return object_list('index.html', query)
[ "def drafts():\n pass", "def published(self):\n return self.get_query_set().filter(\n status__in=BloggingSettings.PUBLISHED_ENTRY_STATES\n ).exclude(\n start_date__gt=datetime.now()\n )", "def draft(self):\n action_unpublished.send(\n sender=self.__class__,\n message=self,\n using='default'\n )", "def drafts(request):\n filtered_message_objects = Message.objects.filter(sender=request.user, sent=False)\n for draft in filtered_message_objects:\n print(draft)\n return render(request, 'messenger/drafts.html', {'filtered_message_objects': filtered_message_objects})", "async def draftlist(self, ctx):\n\t\tif not Path(\"data/drafting/draftlist.p\").is_file():\n\t\t\twith open(\"data/drafting/draftlist.p\", \"wb\") as f:\n\t\t\t\tl = [[\"listbeginning\", \"nothing to see here\", \"nope nothin\"]]\n\t\t\t\tpickle.dump(l, f)\n\t\twith open(\"data/drafting/draftlist.p\", \"rb\") as f:\n\t\t\tl = pickle.load(f)\n\t\temb = discord.Embed(title=\"Draft list\", description=\"The 10 most recent drafts\", color=0x3498db)\n\t\temb.set_thumbnail(url=ctx.message.server.icon_url)\n\t\temb.set_footer(text=\"Use !showdraft <id> for more information on a single draft\")\n\t\tif len(l) >= 10:\n\t\t\tr = 10\n\t\telse:\n\t\t\tr = len(l)\n\t\tfor n in range(r, 1, -1):\n\t\t\temb.add_field(name=str(r-n+1)+\"- \"+l[n-1][0]+\" | \"+l[n-1][2], value=\"ID: \"+l[n-1][1], inline=False)\n\t\tawait self.bot.say(embed=emb)\n\t\tprint(\"## showed draftlist\")", "async def showdraft(self, ctx, draftid=\"\"):\n\t\tif Path(\"drafts/\"+draftid+\".p\").is_file():\n\t\t\twith open(\"drafts/\"+draftid+\".p\", \"rb\") as f:\n\t\t\t\tdraftobj = pickle.load(f)\n\t\t\temb = discord.Embed(title=\"Draft information\", description=\"Draft ID: \"+draftid, color=0x3498db)\n\t\t\temb.set_thumbnail(url=ctx.message.server.icon_url)\n\t\t\temb.add_field(name=\"Name\", value=draftobj.name)\n\t\t\temb.add_field(name=\"Size\", value=str(draftobj.size))\n\t\t\temb.add_field(name=\"Date\", value=draftobj.date)\n\t\t\thost = ctx.message.server.get_member(draftobj.host)\n\t\t\tif host != None:\n\t\t\t\temb.add_field(name=\"Host\", value=host.mention)\n\t\t\telse:\n\t\t\t\temb.add_field(name=\"Host\", value=\"Unknown\")\n\t\t\tif draftobj.draftpage != \"\":\n\t\t\t\temb.add_field(name=\"Draftpage\", value=draftobj.draftpage)\n\t\t\temb.add_field(name=\"Status\", value=draftobj.status)\n\t\t\teligibleRole = discord.utils.get(ctx.message.server.roles, id=draftobj.eligible)\n\t\t\tif eligibleRole != None:\n\t\t\t\temb.add_field(name=\"Eligible\", value=eligibleRole.name)\n\t\t\telse:\n\t\t\t\temb.add_field(name=\"Eligible\", value=\"Unknown\")\n\n\t\t\tif draftobj.results == None:\n\t\t\t\temb.add_field(name=\"Results\", value=\"Nothing entered yet\")\t\n\t\t\telse:\n\t\t\t\tresults = \"*__Player__ __W/L__*\\n\"\n\t\t\t\tfor key in draftobj.results:\n\t\t\t\t\tname = key\n\t\t\t\t\tif name.startswith(\"_\"):\n\t\t\t\t\t\tname = name[1:]\n\t\t\t\t\tresults = results + name +\"\\n \"+ str(draftobj.results[key][0]) +\"/\"+ str(draftobj.results[key][1])+\"\\n\"\n\t\t\t\temb.add_field(name=\"Results\", value=results, inline=False)\n\t\t\tawait self.bot.say(embed=emb)\n\n\t\t\tif draftobj.decks != None:\n\t\t\t\tdeckEmb = discord.Embed(title=\"Decks\", description=\"Decks of '\"+draftobj.name+\"'\", color=0x3498db)\n\t\t\t\tfor d in draftobj.decks:\n\t\t\t\t\tdeckEmb.add_field(name=d[\"name\"], value=d[\"value\"], inline=False)\n\t\t\t\tawait self.bot.say(embed=deckEmb)\n\t\t\t\n\t\t\tprint(\"## showed draftinfos of \"+ draftid)\n\t\t\t\n\t\telse:\n\t\t\tawait self.bot.say(\"That draft does not exists\", delete_after=autodeletetime)", "def get_drafts(self, **kwargs):\n default_kwargs = { \"order\": \"updated_at desc\" }\n default_kwargs.update(kwargs)\n return self.get_messages(statuses=[\"draft\"], **default_kwargs)", "def draft(page):\r\n return app_index(page, cached_apps.get_draft, 'draft',\r\n False, True)", "def blog_draft_detail(request, post_id):\n if not request.user.is_authenticated():\n # pretend nothing's here, raise a 404\n raise Http404\n else:\n # test for object's existence\n entry = get_object_or_404(Entry, pk=post_id)\n if request.user.has_perm('blog.view_draft_entry') or (\n request.user.has_perm('blog.view_own_draft') and request.user ==\n entry.author.user):\n qs = Entry.objects.all()\n c = {'draft': True}\n if not entry.is_published:\n return object_detail(request, queryset=qs, object_id=post_id, extra_context=c)\n else:\n return HttpResponseRedirect(entry.get_absolute_url())\n else:\n raise Http404", "def render_archives():\n\n\tq = \"SELECT title, text, id, project FROM entries WHERE archived=1 ORDER BY id desc\"\n\tcur = g.db.execute(q)\n\trows = cur.fetchall()\n\tentries = [dict(\n\t\t\ttitle=row[0], \n\t\t\ttext=row[1], \n\t\t\tid=row[2], \n\t\t\tproject=row[3]) for row in rows]\n\n\t\"\"\" filter catagories as to not repeat \"\"\"\n\tfiltered_catagories = set([ x[3] for x in rows ])\n\n\treturn render_template('show_entries.html', \n\t\tentries=entries, \n\t\tcatagories=filtered_catagories,\n\t\tfiltered=False,\n\t\tarchived=True,\n\t\t)", "def list_drafts(self, account):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, None,\n \"conveyor.list_drafts\",\n [account['name']])", "def list_drafts(user):\n identity = get_identity_for_user(user)\n service = get_record_service()\n recids = [\n dm.json[\"id\"]\n for dm in service.draft_cls.model_cls.query.all()\n if dm is not None and dm.json is not None\n ]\n\n for recid in recids:\n try:\n draft = service.read_draft(id_=recid, identity=identity)\n click.secho(\n \"{} - {}\".format(draft.id, draft.data[\"metadata\"][\"title\"]), fg=\"green\"\n )\n except:\n pass", "def drafts(self):\n\n return self.filter(status='draft')", "def entries_published(self):\n return entries_published(self.entries)", "def get_draft_revisions(self, object_id):\n content_type = ContentType.objects.get_for_model(self.model)\n return Revision.objects.filter(\n version__object_id=object_id, \n version__content_type=content_type,\n easypublishermetadata__status='draft',\n easypublishermetadata__language=get_language()\n ).select_related().distinct()", "def draft_message(request):\n query = models.Message.query(\n models.Message.sender == request.user.email(),\n models.Message.draft == True,\n ancestor=request.issue.key)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return HttpTextResponse(draft_message.text if draft_message else '')\n return HttpTextResponse('An error occurred.', status=500)", "def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])", "def hide_if_unpublished(self):\n if self.visibility == Visibility.UNPUBLISHED:\n self.visibility = Visibility.HIDDEN\n self.save()", "def items():\n return Post.objects.filter(status=\"publish\").order_by(\"-pub_date\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create new blog Entry
def create(): if request.method == 'POST': if request.form.get('title') and request.form.get('content'): entry = Entry.create( title = request.form.get('title'), content = request.form.get('content'), published = request.form.get('published') or False) flash('Entry created successfully!', 'success') if entry.published: return redirect(url_for('detail', slug=entry.slug)) else: return redirect(url_for('edit', slug=entry.slug)) else: flash('Title and Content are required!', 'danger') return render_template('create.html')
[ "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def create(self, request):\n attrs = self.flatten_dict(request.POST)\n\n if self.exists(**attrs):\n return rc.DUPLICATE_ENTRY\n else:\n post = Blogpost(title=attrs['title'], \n content=attrs['content'],\n author=request.user)\n post.save()\n \n return post", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n title = request.form['title']\n body = request.form['text']\n timestamp = datetime.now()\n nickname = session['nickname']\n post = Post(title=title, body=body, timestamp=timestamp, nickname=nickname)\n dbSQL.session.add(post)\n dbSQL.session.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('blog'))", "def blog_create(request):\n entry = BlogRecord()\n form = BlogCreateForm(request.POST)\n if request.method == 'POST' and form.validate():\n form.populate_obj(entry)\n request.dbsession.add(entry)\n return HTTPFound(location=request.route_url('home'))\n return {'form': form, 'action': request.matchdict.get('action')}", "def newPost(self, postLink, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createNewBlogEntry()\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # publish entry\r\n atomRespEntry = self.createAtomEntry(postLink, atomEntry)\r\n return atomRespEntry", "def new(title):\n blog = models.Blog(os.getcwd())\n try:\n blog.new_post(title)\n except ValueError as ve:\n click.ClickException(str(ve))", "def post(self):\n data = request.json\n return create_new_blog(data=data)", "def addBlogEntry(self, space, title, content = ''):\n return BlogEntry.create(self.pm_getSpaceManager().addBlogEntry(self._unbox(space), title, content), self._modelDataManager)", "def new_entry():\r\n form = forms.NewEntryForm()\r\n if form.validate_on_submit():\r\n entry = models.Journal.create(\r\n title = form.title.data,\r\n date = datetime.datetime.strptime(form.date.data, \r\n '%d/%m/%Y'),\r\n time_spent = form.time_spent.data,\r\n learnt = form.learnt.data.strip(),\r\n resources = form.resources.data.strip(),\r\n user_id = current_user.user_id,\r\n tags_str = form.tags.data\r\n )\r\n tag_list = form.tags.data.split(\", \")\r\n for tag in tag_list:\r\n models.Tags.create(\r\n tag = tag,\r\n entry = entry\r\n )\r\n flash(\"Entry added.\", \"success\")\r\n return redirect(url_for('entries'))\r\n return render_template('new.html', form=form)", "def post(self):\n data = request.get_json()\n return business.create_blog(data['title'], data['context'], data['user_id'], data['tags'])", "def ask_create_blog():\n\n title = input(\"Enter your blog title:\\n\")\n author = input(\"Enter your name:\\n\")\n blogs[title] = Blog(title, author)", "def createEditBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createEditEntryDocument()\r\n self._initEditEntryDocument(atomdoc)\r\n return ZAtomEditBlogEntry(atomdoc)", "def create_post():\r\n try:\r\n comment = form[\"comment\"].value\r\n except:\r\n comment = \"I didn't enter a comment :(\"\r\n\r\n try:\r\n name = form[\"name\"].value\r\n except:\r\n print(\"Content-type: text/html\")\r\n print()\r\n print(\"You need to at least submit a name. \\\r\n Please go back and try again!\")\r\n raise SystemExit\r\n\r\n try:\r\n email = form[\"email\"].value\r\n except:\r\n email = None\r\n\r\n try:\r\n website = form[\"website\"].value\r\n except:\r\n website = None\r\n\r\n post = Post.create(\r\n comment=comment,\r\n name=form[\"name\"].value,\r\n email=email,\r\n website=website,\r\n date=datetime.now().strftime(\"%H:%M - %d/%m/%y\")\r\n )", "def post(self):\n if not self.user:\n self.redirect('../signin')\n return\n\n # get post parameters\n sub = self.request.get('subject')\n body = self.request.get('blog-text')\n\n sub_error = ''\n body_error = ''\n\n # set error is subject is empty\n if not sub:\n sub_error = 'Please enter a subject'\n\n # set error if body is empty\n if not body:\n body_error = 'Please enter some text'\n\n # if error message is set, render page with errors\n if body_error or sub_error:\n self.render('newpost.html',\n user=self.user,\n subject=sub,\n body_text=body,\n subject_error=sub_error,\n body_error=body_error)\n return\n\n newpost = BlogPost(subject=sub,\n blog=body,\n username=self.user.username,\n like_count=0)\n newpost.put()\n BlogPost.flush_cache()\n self.redirect('/blog/%s' % newpost.key().id())", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def test_create_view_post_creates_new_entry_with_given_info(dummy_request):\n from pyramid_learning_journal.views.default import create_view\n from pyramid_learning_journal.models import Entry\n entry_data = {\n 'title': 'fun times',\n 'body': 'all the fun, all the time.'\n }\n dummy_request.method = 'POST'\n dummy_request.POST = entry_data\n create_view(dummy_request)\n entry = dummy_request.dbsession.query(Entry).get(1)\n assert entry.title == entry_data['title']\n assert entry.body == entry_data['body']", "def post(self, entry):\n\n # get text with readability\n post = WordPressPost()\n post.title = entry.title\n post.content = \"Source: <a href='{}'>{}</a><hr\\>{}\".format(\n entry.link,\n urlparse(entry.link).netloc,\n self.get_content(entry.link))\n post.terms_names = {'post_tag': entry.keywords,\n 'category': [\"AutomatedPost\"]}\n post.post_status = 'publish'\n self.client.call(NewPost(post))", "def new_blog(blog, template):\n path = '/'.join([POSTS, blog])\n with open(path, 'w') as blg:\n blg.write(template)", "def new_post(tag, entries, site, dry_run=False):\n\n content_format = get_default_compiler(\n True, site.config['COMPILERS'], site.config['post_pages']\n )\n title = '%s [%s]' % (\n tag.capitalize(), datetime.datetime.now().strftime('%Y-%m-%d')\n )\n\n def write_content(_, **kwargs):\n post_file = kwargs['path']\n with codecs.open(post_file, encoding='utf8') as f:\n text = [\n line.strip() for line in f.readlines()\n if not line.startswith('Write your post')\n ]\n text += [ENTRY_FORMAT % entry for entry in entries]\n\n with codecs.open(post_file, 'w', encoding='utf8') as f:\n f.write('\\n'.join(text))\n\n if dry_run:\n [print(entry['title']) for entry in entries]\n\n else:\n signal('new_post').connect(write_content)\n\n site.config['ADDITIONAL_METADATA']['category'] = tag\n site.commands.new_post(**{\n 'title': title,\n 'tags': tag,\n 'onefile': True,\n 'twofile': False,\n 'content_format': content_format,\n 'schedule': None,\n })\n\n success(tag)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a function converting csv output files from operational_sep_quantities to json files for observations
def obs_csv2json(input_file,output_file,example_path,instrument): obs_path = Path(cfg.obs_path) with open(example_path,'r') as e: example = js.load(e) #deleting unused categories del(example['sep_forecast_submission']['forecasts']) del(example['sep_forecast_submission']['triggers'][2]) del(example['sep_forecast_submission']['triggers'][1]) del(example['sep_forecast_submission']['triggers'][0]) del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument']) del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time']) del(example['sep_forecast_submission']['contacts']) del(example['sep_forecast_submission']['model']) del(example['sep_forecast_submission']['issue_time']) example['sep_forecast_submission']['mode'] = 'observation' #json template for observations obs_json = example fieldnames = ('energy_threshold','flux_threshold','start_time','intensity', 'peak_time','rise_time','end_time','duration','fluence>10', 'fluence>100') #extracting data from csv file with open(input_file,'r') as f: reader = csv.DictReader(f, fieldnames) out = js.dumps( [ row for row in reader ] ) obs_data = js.loads(out) data={} (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['observatory']) = instrument #creating data for all energy levels forecast for j in range(1,len(obs_data)): data[j-1]=obs_data[j] #recording start and end times for all events for i in range(len(data)): data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S') data[i]['start_time'] = data[i]['start_time'].isoformat() data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S') data[i]['end_time'] = data[i]['end_time'].isoformat() data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S') data[i]['peak_time'] = data[i]['peak_time'].isoformat() #recording observed values for all events if i > 0: (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events']).append({}) event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events'][i]) #start and end times event['start_time']=data[i]['start_time'] event['threshold'] = data[i]['flux_threshold'] event['energy_min'] = float(data[i]['energy_threshold'][1:]) event['energy_max'] = -1 event['end_time']=data[i]['end_time'] #peak values event['peak_intensity']=data[i]['intensity'] event['peak_time'] = data[i]['peak_time'] event['intensity_units']='pfu' #fluence values event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value', 'units' : 'MeV [cm^-2]'}, {'energy_min' : '100', 'fluence_value' : 'fluence_value', 'units' : 'MeV [cm^-2]'}] event['fluence'][0]['fluence']=data[i]['fluence>10'] event['fluence'][1]['fluence']=data[i]['fluence>100'] if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index (int(event['energy_min']))]: event['all_clear_boolean'] = 'false' else: event['all_clear_boolean'] = 'true' #building json file with open(obs_path / output_file, 'w') as s: js.dump(obs_json,s,indent=1) print('json file %s created' %output_file) return
[ "def parse_csv_data_to_json(input_file, output_file):\n with open(input_file) as f:\n # open the output file for writing\n with open(output_file, 'w') as myfile:\n\n # read in the csv\n input_content = csv.reader(f, delimiter=',')\n\n # skip the header and store it to be used with the json objects\n field_names = next(f).strip().split(\",\")\n number_of_records_written = 0\n for x in input_content:\n # make a dictionary of keys and values for json dumping\n dictionary = dict(zip(field_names, x))\n\n # delete an fields that are empty string to suppress errors while uploading\n cleaned_dict = {k: v for k, v in dictionary.items() if v is not \"\"}\n\n # set the id of the index to the ack id\n action_and_meta_data[\"index\"][\"_id\"] = cleaned_dict.get(\"ACK_ID\")\n\n # dump the index and data to file\n json.dump(action_and_meta_data, myfile)\n myfile.write('\\n')\n json.dump(cleaned_dict, myfile)\n myfile.write('\\n')\n number_of_records_written += 1\n\n return number_of_records_written", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def metrics_to_json(metrics_csv_int, region, coords, metrics_filename):\n data = {}\n with open(metrics_csv_int,'r') as f:\n reader = csv.reader(f)\n fields = next(reader)\n for row in reader:\n data[row[0]] = {\"Temporal intermittency\": {},\n \"Spatial intermittency\": {}}\n # skip the first key in fields, clean up field name\n for i,field in enumerate(fields[1:6]):\n data[row[0]][\"Temporal intermittency\"].update({field[5:]:float(row[i+1])})\n for i,field in enumerate(fields[6:]):\n data[row[0]][\"Spatial intermittency\"].update({field[3:]:float(row[i+6])})\n with open(metrics_filename, 'r') as fname:\n metrics = json.load(fname)\n\n # Add region to dimensions information\n metrics['DIMENSIONS']['dimensions']['region'].update({region: coords})\n\n # Update model statistics\n for model in data:\n if not (model in metrics['RESULTS']):\n metrics['RESULTS'][model] = {}\n metrics['DIMENSIONS']['dimensions']['dataset'].update({model: {}})\n metrics['RESULTS'][model][region] = data[model]\n\n # Write new metrics to same file\n with open(metrics_filename, 'w') as fname:\n json.dump(metrics,fname,indent = 2)", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def convert_data():\n # Hard coded input and output filename\n filename_in = 'data.csv'\n filename_out = os.path.join('..', 'mongo-seed', 'data.json')\n\n # Read the data from disk\n data = pd.read_csv(filename_in)\n\n # convert to a list of dictionaries\n chemicals = [_row_to_chemical(row) for _, row in data.iterrows()]\n\n with open(filename_out, 'w') as f:\n json.dump(chemicals, f)", "def _export_jql_items(items, output_file, format='json', compress=False):\n if format == 'json':\n Mixpanel.export_data(items, output_file, format=format, compress=compress)\n elif format == 'csv':\n with open(output_file, 'w') as f:\n f.write(items)\n if compress:\n Mixpanel._gzip_file(output_file)\n os.remove(output_file)\n else:\n Mixpanel.LOGGER.warning('Invalid format must be either json or csv, got: ' + format)\n return", "def csv_to_json(archivo, clase, columnas):\n df = pd.read_csv(archivo)\n topics = list(df[clase].unique())\n topics = [x for x in topics if str(x) != 'nan']\n\n for topic in topics:\n df_ = df[df[clase] == topic]\n\n df_ = df_[columnas]\n df_.columns = ['spanish', 'english', 'french', 'chinese', 'englishaudio', 'spanishaudio', 'frenchaudio',\n 'chineseaudio', 'image']\n print(df_.columns)\n df_ = df_.to_json(orient='records')\n data_json = json.loads(df_)\n pprint.pprint(data_json[0])\n data_json = {'items': data_json}\n print(topic)\n\n with io.open('csv_{}.json'.format(topic), 'w', encoding='utf-8') as f:\n f.write(json.dumps(data_json, ensure_ascii=False, indent=4, sort_keys=True))", "def to_csv_json_set(self, csv_file_path, json_file_path, write_mode: str = 'w'):\n self.to_csv(csv_file_path)\n with open(json_file_path, write_mode) as f:\n json.dump(self.metadata_dict, f)", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def df_to_json(complete_dataset, output_path, static_columns):\n megajson = {}\n\n static_columns = [\"continent\", \"location\"] + list(static_columns)\n\n complete_dataset = complete_dataset.dropna(axis=\"rows\", subset=[\"iso_code\"])\n\n for _, row in complete_dataset.iterrows():\n\n row_iso = row[\"iso_code\"]\n row_dict_static = row.drop(\"iso_code\")[static_columns].dropna().to_dict()\n row_dict_dynamic = row.drop(\"iso_code\").drop(static_columns).dropna().to_dict()\n\n if row_iso not in megajson:\n megajson[row_iso] = row_dict_static\n megajson[row_iso][\"data\"] = [row_dict_dynamic]\n else:\n megajson[row_iso][\"data\"].append(row_dict_dynamic)\n\n with open(output_path, \"w\") as file:\n file.write(json.dumps(megajson, indent=4))", "def export_sampleStorage_csv(self, sample_ids_I, filename_O):\n\n data_O = [];\n for sample_id in sample_ids_I:\n data_tmp =[];\n data_tmp = self.get_rows_sampleID_limsSampleStorage(sample_id);\n data_O.extend(data_tmp);\n if data_O:\n io = base_exportData(data_O);\n io.write_dict2csv(filename_O);", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def transform2json(source, target):\n behaviors = pd.read_table(\n source,\n header=None,\n names=['uid', 'time', 'clicked_news', 'impression'])\n f = open(target, \"w\")\n with tqdm(total=len(behaviors), desc=\"Transforming tsv to json\") as pbar:\n for row in behaviors.itertuples(index=False):\n item = {}\n item['uid'] = row.uid[1:]\n item['time'] = row.time\n item['impression'] = {\n x.split('-')[0][1:]: int(x.split('-')[1])\n for x in row.impression.split()\n }\n f.write(json.dumps(item) + '\\n')\n\n pbar.update(1)\n\n f.close()", "def build_csv_write(api):\n\n write_rows = []\n for info in api:\n write_rows.append([info[\"number\"], info[\"status\"], info[\"available_bike_stands\"],\n info[\"available_bikes\"], time])\n\n return write_rows", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def write_to_csv(self, name_suffix = ''):\n f_path = os.path.join(self.root_dir, 'res' + name_suffix + '.csv')\n field_names = [] # the first field in CSV is 'obj_val'\n\n # put the keys in the cost, prim_var_change, dual_var_change and fea_conditions as field names if any\n for key in self.cost.keys():\n field_names.append(key)\n for key in self.cost_change.keys():\n field_names.append(key)\n for key in self.prim_var_change.keys():\n field_names.append(key)\n for key in self.dual_var_change.keys():\n field_names.append(key)\n for key in self.fea_conditions.keys():\n field_names.append(key)\n\n\tprint f_path\n\n with open(f_path, mode = 'wb') as csv_file: # open the file, if not exist, create it\n writer = csv.DictWriter(csv_file, fieldnames = field_names) # create a writer which maps the dictionaries onto output rows in CSV\n writer.writeheader() # write the field names to the header\n temp_dict = {} # create a temporary dict used to output rows\n row_max = self.get_iter_num() # get the max iters which indicates the number of rows in CSV\n print ('number of rows: ' + str(row_max))\n #print (field_names)\n for row in range(row_max + 1):\n temp_dict.clear() # clear all items\n start_idx = 0\n for i in range(len(self.cost)):\n field = field_names[start_idx + i]\n\t\t if row > len(self.cost[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n\t\t else: temp_dict[field] = self.get_cost_val(field, row)\n\n start_idx = start_idx + len(self.cost) # the start pos of fields in field_names for prim_var_change\n for i in range(len(self.cost_change)): # for each cost_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n elif row > len(self.cost_change[field]) - 1:\n\t\t\t temp_dict[field] = ''\n\t\t else:\n temp_dict[field] = self.get_cost_change_value(field, row - 1)\n\n\n start_idx = start_idx + len(self.cost_change)\n for i in range(len(self.prim_var_change)): # for each prim_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n\t\t elif row > len(self.prim_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else:\n temp_dict[field] = self.get_prim_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.prim_var_change) # go to the start pos of fields in field_names for dual_var_change\n for i in range(len(self.dual_var_change)): # for each dual_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of dual variables\n temp_dict[field] = '/'\n elif row > len(self.dual_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = '' \n\t\t else:\n temp_dict[field] = self.get_dual_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.dual_var_change) # go the the start pos of fields in field_names for fea_conditions\n for i in range(len(self.fea_conditions)): # for each fea_condition\n field = field_names[start_idx + i]\n\t\t if row > len(self.fea_conditions[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else: temp_dict[field] = self.get_fea_condition_value(field, row)\n\n writer.writerow(temp_dict)\n\n # we also save the value of primal values if not saved\n if not self.pdv_to_csv:\n self.save_last_prims()", "def to_json(self):\n\n for column in self.data.columns:\n with open(os.path.join(self.data_dir, column + \".json\"), \"w\") as f:\n json.dump(obj=list(self.data.get(column)),\n fp=f,\n sort_keys=True,\n indent=4)", "def generate_quotes_csv(country='US', currency='USD', originplace='CMH', destinationplace='anywhere', inboundpartialdate='anytime', outboundpartialdate='anytime', locale='en-US', output_file=''):\n\n url = f\"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/browseroutes/v1.0/{country}/{currency}/{locale}/{originplace}/{destinationplace}/{outboundpartialdate}/{inboundpartialdate}\"\n\n headers = {\n 'x-rapidapi-host': \"skyscanner-skyscanner-flight-search-v1.p.rapidapi.com\",\n 'x-rapidapi-key': \"2d32d04dd5msh5cb7c5e39103bebp1cd117jsn0f0dea8441dc\"\n }\n\n response = requests.request(\"GET\", url, headers=headers).json()\n\n places_df = create_df_from_json(response['Places'])\n places_df = places_df.drop(columns=['Type', 'SkyscannerCode', 'CityId'])\n\n carriers_dict = create_carriers_dict(response['Carriers'])\n\n quotes_df = create_df_from_json(response['Quotes'])\n\n quotes_df = quotes_df.merge(places_df, left_on='OutboundLeg.OriginId', right_on='PlaceId', suffixes=('', '_OutboundOrigin'))\n quotes_df = quotes_df.merge(places_df, left_on='OutboundLeg.DestinationId', right_on='PlaceId', suffixes=('', '_OutboundDestination'))\n quotes_df = quotes_df.merge(places_df, left_on='InboundLeg.OriginId', right_on='PlaceId', suffixes=('', '_InboundOrigin'))\n quotes_df = quotes_df.merge(places_df, left_on='InboundLeg.DestinationId', right_on='PlaceId', suffixes=('', '_InboundDestination'))\n\n quotes_df = quotes_df.rename(columns={'Name':'Name_OutboundOrigin', 'Type':'Type_OutboundOrigin',\n 'SkyscannerCode':'SkyscannerCode_OutboundOrigin', 'IataCode':'IataCode_OutboundOrigin', 'CityName':'CityName_OutboundOrigin', \n 'CityId':'CityId_OutboundOrigin', 'CountryName':'CountryName_OutboundOrigin'})\n \n quotes_df['OutboundLeg.Carriers'] = quotes_df['OutboundLeg.CarrierIds'].apply(lambda x: convert_carriers(x, carriers_dict))\n quotes_df['InboundLeg.Carriers'] = quotes_df['InboundLeg.CarrierIds'].apply(lambda x: convert_carriers(x, carriers_dict))\n \n latest_scan_df = quotes_df[['QuoteId', 'MinPrice', 'Direct', 'QuoteDateTime', 'OutboundLeg.Carriers', 'InboundLeg.Carriers',\n 'OutboundLeg.DepartureDate', 'InboundLeg.DepartureDate', 'Name_OutboundOrigin', 'IataCode_OutboundOrigin',\n 'CityName_OutboundOrigin', 'CountryName_OutboundOrigin', 'Name_OutboundDestination', \n 'IataCode_OutboundDestination', 'CityName_OutboundDestination', 'CountryName_OutboundDestination', \n 'IataCode_InboundOrigin', 'IataCode_InboundDestination']]\n\n latest_scan_df['RunDate'] = datetime.now()\n\n output_file = 'output.csv' if output_file == '' else output_file\n latest_scan_df.to_csv(output_file) # to append, pass mode='a', header=False\n print(f\"Wrote outputs to {output_file}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
choose the correct instrument to use for observations for a given date range. inputs must be date objects from the datetime module. used if there is no information about which instrument was primary.
def choose_inst(given_start_date,given_end_date): #INPUTS MUST BE DATE OBJECTS inst_start_dates=[] inst_end_dates=[] good_instruments = [] good_end_dates = [] bad_inst = [] #extracting dates where instruments are active from csv file inst_dates = pd.read_csv(ref_path / 'instrument_dates.csv') for s in inst_dates['start']: inst_start_dates.append(datetime.strptime(str(s),'%Y-%m').date()) for e in inst_dates['end']: if str(e) == 'nan': inst_end_dates.append(datetime.today().date()) else: inst_end_dates.append(datetime.strptime(str(e),'%Y-%m').date()) #checking which instruments are active during given time period and #choosing the correct ones print('checking which instruments are active for given dates') for i in range(len(inst_start_dates)): if (inst_start_dates[i] < given_start_date) and (given_end_date < inst_end_dates[i]): print('%s works' %inst_dates['Instrument'][i]) good_instruments.append(inst_dates['Instrument'][i]) good_end_dates.append(inst_end_dates[i]) else: print('outside of %s range' %inst_dates['Instrument'][i]) #checking if active instruments actually have data for that date for inst in good_instruments: inst_str = inst.replace('-','').lower() year = str(given_start_date).split('-')[0] month = str(given_start_date).split('-')[1] url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/' + inst_str) try: request.urlopen(url) print('%s data available' %inst) except: print('%s data NOT available' %inst) bad_inst.append(inst) #not choosing instrument if it doesn't have data for binst in bad_inst: good_instruments.remove(binst) #if more than one instrument is available, choose which one to use if len(good_instruments) > 1: print('Please choose which instrument you would like to use.') for j in range(len(good_instruments)): print('Type ' + str(j) + ' for ' + str(good_instruments[j])) inst_choice = input('Answer:' ) instrument = good_instruments[int(inst_choice)] end_date = good_end_dates[int(inst_choice)] print('we are using %s as our instrument for observations' %instrument) else: instrument = good_instruments[0] end_date = good_end_dates[0] print('we are using %s as our instrument for observations' %instrument) return([instrument,end_date])
[ "def choose_prime_inst(given_start_date,given_end_date):\r\n\r\n #extracting primary dates where instruments are active from csv file\r\n inst_prime_dates = pd.read_csv(ref_path / 'GOES_primary_assignments.csv', header=3)\r\n\r\n #figuring out which instrument is primary for given start date\r\n for d in range(len(inst_prime_dates['Start Date'])):\r\n change_date = parse(inst_prime_dates['Start Date'][d])\r\n if given_start_date >= change_date.date():\r\n prime_inst = inst_prime_dates['EPEAD Primary'][d]\r\n backup_inst = inst_prime_dates['EPEAD Secondary'][d]\r\n end_date = parse(inst_prime_dates['Start Date'][d+1]).date()\r\n\r\n #if no prime instrument available, have to choose which instrument\r\n #to use based on which instruments have data for this date\r\n if str(prime_inst) == 'nan':\r\n if str(backup_inst) == 'nan':\r\n print('no information about primary instrument available.'\r\n 'Choosing instrument based on active date ranges')\r\n alternate_output = choose_inst(given_start_date,given_end_date)\r\n\r\n return(alternate_output)\r\n else:\r\n prime_inst = backup_inst\r\n\r\n break\r\n\r\n prime_inst = str(prime_inst).split('.')[0]\r\n\r\n #reformatting instrument name\r\n if len(prime_inst) == 2:\r\n inst_str = str(prime_inst)\r\n elif len(prime_inst) == 1:\r\n inst_str = '0' + str(prime_inst)\r\n\r\n print('GOES-%s is the primary instrument for given start time' %inst_str)\r\n\r\n #checking to make sure this primary instrument actually has data\r\n year = str(given_start_date).split('-')[0]\r\n month = str(given_start_date).split('-')[1]\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' +\r\n month + '/goes' + inst_str)\r\n\r\n try:\r\n request.urlopen(url)\r\n print('GOES-%s has data available' %inst_str)\r\n instrument = 'GOES-' + inst_str\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n except request.HTTPError:\r\n #if primary instrument doesn't have data for this date, using backup instrument\r\n print('GOES-%s does NOT have data available' %inst_str)\r\n\r\n #reformatting backup instrument\r\n if len(str(backup_inst)) == 2:\r\n inst_str = str(backup_inst)\r\n elif len(str(backup_inst)) ==1:\r\n inst_str = '0' + str(backup_inst)\r\n\r\n print('checking for data from backup instrument GOES-%s' %inst_str)\r\n\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/'\r\n + month + '/goes' + inst_str)\r\n\r\n #checking to see if backup instrument has data for this date, if not have\r\n #to manually choose which instrument to use based off which instruments\r\n #have data available\r\n try:\r\n request.urlopen(url)\r\n print('backup instrument data found - using backup instrument')\r\n instrument = 'GOES-' + inst_str\r\n print('we are using %s as our instrument for observations'\r\n %instrument)\r\n\r\n except request.HTTPError:\r\n print('no knowledge of backup or primary instrument - choosing '\r\n 'instrument based on available data')\r\n alternate_output = choose_inst(given_start_date,given_end_date)\r\n\r\n return(alternate_output)\r\n\r\n return([instrument,end_date])", "def sample_deformation_on_acq_dates(acq_dates, tc_def, def_dates):\n \n import numpy as np\n \n n_acq = len(acq_dates) # \n tc_def_resampled = np.zeros((n_acq, 1)) # initialise as empty (zeros)\n \n for acq_n, acq_date in enumerate(acq_dates): \n day_arg = def_dates.index(acq_date) # find which day number the acquiisiont day is\n day_def = tc_def[day_arg] # get the deformaiton for that day\n tc_def_resampled[acq_n, 0] = day_def # record\n \n return tc_def_resampled", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def func_checker(*args, **kwargs):\n da = args[0]\n if \"start_date\" not in kwargs or kwargs[\"start_date\"] is None:\n # use string for first year only - .sel() will include all time steps\n kwargs[\"start_date\"] = da.time.min().dt.strftime(\"%Y\").values\n if \"end_date\" not in kwargs or kwargs[\"end_date\"] is None:\n # use string for last year only - .sel() will include all time steps\n kwargs[\"end_date\"] = da.time.max().dt.strftime(\"%Y\").values\n\n if isinstance(kwargs[\"start_date\"], int) or isinstance(kwargs[\"end_date\"], int):\n warnings.warn(\n \"start_date and end_date require dates in (type: str) \"\n 'using formats of \"%Y\", \"%Y-%m\" or \"%Y-%m-%d\".',\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"start_date\"] = str(kwargs[\"start_date\"])\n kwargs[\"end_date\"] = str(kwargs[\"end_date\"])\n\n try:\n sel_time = da.time.sel(time=kwargs[\"start_date\"])\n if sel_time.size == 0:\n raise ValueError()\n except KeyError:\n warnings.warn(\n '\"start_date\" not found within input date time range. Defaulting to minimum time step in '\n \"xarray object.\",\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"start_date\"] = da.time.min().dt.strftime(\"%Y\").values\n except ValueError:\n warnings.warn(\n '\"start_date\" has been nudged to nearest valid time step in xarray object.',\n UserWarning,\n stacklevel=2,\n )\n nudged = da.time.sel(time=slice(kwargs[\"start_date\"], None)).values[0]\n kwargs[\"start_date\"] = to_isoformat(nudged)\n\n try:\n sel_time = da.time.sel(time=kwargs[\"end_date\"])\n if sel_time.size == 0:\n raise ValueError()\n except KeyError:\n warnings.warn(\n '\"end_date\" not found within input date time range. Defaulting to maximum time step in '\n \"xarray object.\",\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"end_date\"] = da.time.max().dt.strftime(\"%Y\").values\n except ValueError:\n warnings.warn(\n '\"end_date\" has been nudged to nearest valid time step in xarray object.',\n UserWarning,\n stacklevel=2,\n )\n nudged = da.time.sel(time=slice(None, kwargs[\"end_date\"])).values[-1]\n kwargs[\"end_date\"] = to_isoformat(nudged)\n\n if (\n da.time.sel(time=kwargs[\"start_date\"]).min()\n > da.time.sel(time=kwargs[\"end_date\"]).max()\n ):\n raise ValueError(\n f'Start date (\"{kwargs[\"start_date\"]}\") is after end date (\"{kwargs[\"end_date\"]}\").'\n )\n\n return func(*args, **kwargs)", "def __rowAtDate__(self, analysis, input, test_date):\n # check if date is at a boundary of, or falls outside, the period of testing\n lower = analysis.__initialAnalysisPeriod__()\n upper = len(input[\"Date\"]) - 1\n \n if input[\"Date\"][lower]>=test_date:\n return lower\n \n elif input[\"Date\"][upper]<=test_date:\n return upper\n \n else: # perform binary search through test dates\n while True:\n middle = (lower + upper)/2\n \n if input[\"Date\"][middle-1]<test_date and input[\"Date\"][middle]>=test_date:\n return middle\n \n elif input[\"Date\"][middle]<test_date:\n lower = middle\n \n else: #input[\"Date\"][middle]>test_date:\n upper = middle", "def price_opt( self,\r\n instrument,\r\n val_date ):\r\n if isinstance( val_date, str ):\r\n for fmt in ( \"%Y-%m-%d\", \"%m/%d/%Y\" ):\r\n try:\r\n val_date = dt.datetime.strptime(val_date, fmt).date()\r\n break\r\n except ValueError:\r\n pass\r\n npv_ans,inst = self.cal_opt_rate( val_date, instrument )\r\n return npv_ans", "def test_date_range(self):\n chip = search.DateRangeChip()\n with self.assertRaises(ValueError):\n chip.start_time = \"20 minutes\"\n\n chip = search.DateRangeChip()\n date_string = \"2020-12-12T12:12:12,2020-12-12T12:12:12\"\n chip.from_dict({\"value\": date_string})\n\n expected_chip = {\n \"active\": True,\n \"field\": \"\",\n \"type\": \"datetime_range\",\n \"operator\": \"must\",\n \"value\": date_string,\n }\n\n self.assertEqual(chip.chip, expected_chip)\n self.assertEqual(chip.start_time, \"2020-12-12T12:12:12\")\n self.assertEqual(chip.end_time, \"2020-12-12T12:12:12\")\n\n chip_micro = search.DateRangeChip()\n date_string_micro = \"2020-12-12T12:12:12.000Z,2020-12-12T12:12:12.000Z\"\n chip_micro.from_dict({\"value\": date_string_micro})\n\n expected_chip = {\n \"active\": True,\n \"field\": \"\",\n \"type\": \"datetime_range\",\n \"operator\": \"must\",\n \"value\": date_string,\n }\n\n self.assertEqual(chip_micro.chip, expected_chip)\n self.assertEqual(chip_micro.start_time, \"2020-12-12T12:12:12\")\n self.assertEqual(chip_micro.end_time, \"2020-12-12T12:12:12\")\n\n chip_micro = search.DateRangeChip()\n date_string = \"2020-12-12T12:12:12.001,2020-12-12T12:12:12.002\"\n chip_micro.from_dict({\"value\": date_string})\n\n expected_chip = {\n \"active\": True,\n \"field\": \"\",\n \"type\": \"datetime_range\",\n \"operator\": \"must\",\n \"value\": date_string,\n }\n\n self.assertEqual(chip_micro.chip, expected_chip)\n self.assertEqual(chip_micro.start_time, \"2020-12-12T12:12:12.001\")\n self.assertEqual(chip_micro.end_time, \"2020-12-12T12:12:12.002\")", "def pd_date_range(*args, **kwargs):\n incl = version.parse(pd.__version__) >= version.parse('1.4.0')\n\n if incl and 'closed' in kwargs:\n kwargs['inclusive'] = kwargs.pop('closed')\n elif not incl and 'inclusive' in kwargs:\n kwargs['closed'] = kwargs.pop('inclusive')\n if kwargs['closed'] == 'both':\n kwargs['closed'] = None\n\n return pd.date_range(*args, **kwargs)", "def __init__(self, startDate, endDate, referenceDate, effectiveDate, freq, notional, recovery=0.4):\n self.recovery=recovery\n self.simNum = 10\n self.xR = []\n self.xQ = []\n # use MC to generate scenarios for exposure\n self.mcSim = MC_Vasicek_Sim()\n #self.mcSim.setVasicek(minDay=startDate,maxDay=endDate,x=xR,t_step=1.0/365, simNumber=self.simNum)\n # z is discount curve\n self.zCurve =[]\n self.swapRate = []\n self.freq = freq\n self.notional=notional\n self.fixedLeg = []\n self.floatingLeg = []\n self.startDate = startDate\n self.endDate = endDate\n self.referenceDate = referenceDate\n self.effctiveDate = effectiveDate\n self.initialSpread =[]\n self.myScheduler = Scheduler()\n self.datelist = pd.date_range(start=effectiveDate,end=endDate, freq=freq)", "def func_checker(*args, **kwargs):\n da = args[0]\n if \"start_date\" not in kwargs:\n # use string for first year only - .sel() will include all time steps\n kwargs[\"start_date\"] = da.time.min().dt.strftime(\"%Y\").values\n if \"end_date\" not in kwargs:\n # use string for last year only - .sel() will include all time steps\n kwargs[\"end_date\"] = da.time.max().dt.strftime(\"%Y\").values\n\n if isinstance(kwargs[\"start_date\"], int) or isinstance(kwargs[\"end_date\"], int):\n warnings.warn(\n \"start_date and end_date require dates in (type: str) \"\n 'using formats of \"%Y\", \"%Y-%m\" or \"%Y-%m-%d\".',\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"start_date\"] = str(kwargs[\"start_date\"])\n kwargs[\"end_date\"] = str(kwargs[\"end_date\"])\n\n try:\n da.time.sel(time=kwargs[\"start_date\"])\n except KeyError:\n warnings.warn(\n '\"start_date\" not found within input date time range. Defaulting to minimum time step in '\n \"xarray object.\",\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"start_date\"] = da.time.min().dt.strftime(\"%Y\").values\n try:\n da.time.sel(time=kwargs[\"end_date\"])\n except KeyError:\n warnings.warn(\n '\"end_date\" not found within input date time range. Defaulting to maximum time step in '\n \"xarray object.\",\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"end_date\"] = da.time.max().dt.strftime(\"%Y\").values\n\n if (\n da.time.sel(time=kwargs[\"start_date\"]).min()\n > da.time.sel(time=kwargs[\"end_date\"]).max()\n ):\n raise ValueError(\n f'Start date (\"{kwargs[\"start_date\"]}\") is after end date (\"{kwargs[\"end_date\"]}\").'\n )\n\n return func(*args, **kwargs)", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def _getInsuranceCosts(self, date):\n start_date_insurance = self.last_day_construction\n end_date_insurance = start_date_insurance + relativedelta(years=self.insuranceDurationEquipment)\n if start_date_insurance <= date <= end_date_insurance:\n return self.insuranceFeeEquipment * self.investments / 365 #deviding by 365 days in year\n else:\n return 0", "def GetInstrumentPrice(instrument, date, market):\n key = '%s|%s|%s' % (instrument.Oid(), market.Oid(), str(date))\n if not __instrumentPriceDict.HasKey(key):\n price = acm.FPrice.Select(\"instrument = %i and market = %i and day = %s\" % (instrument.Oid(), market.Oid(), str(date)))\n if price:\n __instrumentPriceDict[key] = price.At(0)\n else:\n __instrumentPriceDict[key] = 0.0\n return __instrumentPriceDict[key]", "def archive_date_range(request, inst):\n\n # Ensure the instrument is correctly capitalized\n inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]\n\n template = 'archive_date_range.html'\n sort_type = request.session.get('image_sort', 'Recent')\n group_type = request.session.get('image_group', 'Exposure')\n context = {'inst': inst,\n 'base_url': get_base_url(),\n 'sort': sort_type,\n 'group': group_type}\n\n return render(request, template, context)", "def process(self, inputs):\n beg_date = datetime.datetime.strptime(self.conf['beg'], '%Y-%m-%d') # noqa: F841\n end_date = datetime.datetime.strptime(self.conf['end'], '%Y-%m-%d') # noqa: F841\n return df.query('datetime<@end_date and datetime>=@beg_date')", "def getIntervalMatchingDate(self, basedate, clock=_clock_daily):\n resolve = lambda x: self.ucs[x] if x in self.ucs else None\n clock = int(clock)\n if clock == resolve(self._clock_daily):\n # daily\n return datetime.date.fromordinal(basedate.toordinal())\n elif clock == resolve(self._clock_weekly):\n # weekly\n if self.map_mode == self._map_to_start:\n return self.getWeekStartDate(basedate)\n else:\n return self.getWeekEndDate(basedate)\n elif clock == resolve(self._clock_monthly):\n # monthly\n if self.map_mode == self._map_to_start:\n return datetime.date(basedate.year, basedate.month, 1)\n else:\n return datetime.date(basedate.year, basedate.month, calendar.monthrange(basedate.year, basedate.month)[1])\n elif clock == resolve(self._clock_quarterly):\n # quarterly\n if basedate.month > 0 and basedate.month < 4:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=1, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=3, day=31)\n elif basedate.month >= 4 and basedate.month < 7:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=4, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=6, day=30)\n elif basedate.month >= 7 and basedate.month < 10:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=7, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=9, day=30)\n else:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=10, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=12, day=31)\n elif clock == resolve(self._clock_half_yearly):\n # half yearly\n if basedate.month <= 6:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=1, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=6, day=30)\n else:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=7, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=12, day=31)\n elif clock == resolve(self._clock_yearly):\n # yearly\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=1, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=12, day=31)", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def apply_filter(self, date_range, min_value=-np.inf, max_value=np.inf, r_type='Ed', accum_filter=False):\n df = self.get_sliced_radiance(date_range, r_type=r_type) if date_range else self[r_type]\n\n subset = df[(df.max(axis=1) > min_value) & (df.max(axis=1) < max_value)]['DateTime'].astype('str').to_list()\n\n if accum_filter and self._subset:\n self.subset = intersection(subset, self._subset)\n else:\n self.subset = subset\n\n # subset = intersection(subset, self.radiances['Rrs']['DateTime'].astype('str').to_list())\n\n # if accum_filter and len(self._subset) > 0:\n\n # self._subset = intersection(subset, self._subset) if not accum_filter else intersection" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
choose the correct instrument to use for observations for a given date range based on the primary instrument for that time period. inputs must be date objects from the datetime module.
def choose_prime_inst(given_start_date,given_end_date): #extracting primary dates where instruments are active from csv file inst_prime_dates = pd.read_csv(ref_path / 'GOES_primary_assignments.csv', header=3) #figuring out which instrument is primary for given start date for d in range(len(inst_prime_dates['Start Date'])): change_date = parse(inst_prime_dates['Start Date'][d]) if given_start_date >= change_date.date(): prime_inst = inst_prime_dates['EPEAD Primary'][d] backup_inst = inst_prime_dates['EPEAD Secondary'][d] end_date = parse(inst_prime_dates['Start Date'][d+1]).date() #if no prime instrument available, have to choose which instrument #to use based on which instruments have data for this date if str(prime_inst) == 'nan': if str(backup_inst) == 'nan': print('no information about primary instrument available.' 'Choosing instrument based on active date ranges') alternate_output = choose_inst(given_start_date,given_end_date) return(alternate_output) else: prime_inst = backup_inst break prime_inst = str(prime_inst).split('.')[0] #reformatting instrument name if len(prime_inst) == 2: inst_str = str(prime_inst) elif len(prime_inst) == 1: inst_str = '0' + str(prime_inst) print('GOES-%s is the primary instrument for given start time' %inst_str) #checking to make sure this primary instrument actually has data year = str(given_start_date).split('-')[0] month = str(given_start_date).split('-')[1] url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/goes' + inst_str) try: request.urlopen(url) print('GOES-%s has data available' %inst_str) instrument = 'GOES-' + inst_str print('we are using %s as our instrument for observations' %instrument) except request.HTTPError: #if primary instrument doesn't have data for this date, using backup instrument print('GOES-%s does NOT have data available' %inst_str) #reformatting backup instrument if len(str(backup_inst)) == 2: inst_str = str(backup_inst) elif len(str(backup_inst)) ==1: inst_str = '0' + str(backup_inst) print('checking for data from backup instrument GOES-%s' %inst_str) url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/goes' + inst_str) #checking to see if backup instrument has data for this date, if not have #to manually choose which instrument to use based off which instruments #have data available try: request.urlopen(url) print('backup instrument data found - using backup instrument') instrument = 'GOES-' + inst_str print('we are using %s as our instrument for observations' %instrument) except request.HTTPError: print('no knowledge of backup or primary instrument - choosing ' 'instrument based on available data') alternate_output = choose_inst(given_start_date,given_end_date) return(alternate_output) return([instrument,end_date])
[ "def choose_inst(given_start_date,given_end_date): #INPUTS MUST BE DATE OBJECTS\r\n\r\n inst_start_dates=[]\r\n inst_end_dates=[]\r\n good_instruments = []\r\n good_end_dates = []\r\n bad_inst = []\r\n\r\n #extracting dates where instruments are active from csv file\r\n inst_dates = pd.read_csv(ref_path / 'instrument_dates.csv')\r\n\r\n for s in inst_dates['start']:\r\n inst_start_dates.append(datetime.strptime(str(s),'%Y-%m').date())\r\n\r\n for e in inst_dates['end']:\r\n if str(e) == 'nan':\r\n inst_end_dates.append(datetime.today().date())\r\n else:\r\n inst_end_dates.append(datetime.strptime(str(e),'%Y-%m').date())\r\n\r\n #checking which instruments are active during given time period and\r\n #choosing the correct ones\r\n print('checking which instruments are active for given dates')\r\n\r\n for i in range(len(inst_start_dates)):\r\n if (inst_start_dates[i] < given_start_date) and (given_end_date <\r\n inst_end_dates[i]):\r\n print('%s works' %inst_dates['Instrument'][i])\r\n good_instruments.append(inst_dates['Instrument'][i])\r\n good_end_dates.append(inst_end_dates[i])\r\n else:\r\n print('outside of %s range' %inst_dates['Instrument'][i])\r\n\r\n #checking if active instruments actually have data for that date\r\n for inst in good_instruments:\r\n inst_str = inst.replace('-','').lower()\r\n year = str(given_start_date).split('-')[0]\r\n month = str(given_start_date).split('-')[1]\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' +\r\n month + '/' + inst_str)\r\n\r\n try:\r\n request.urlopen(url)\r\n print('%s data available' %inst)\r\n\r\n except:\r\n print('%s data NOT available' %inst)\r\n bad_inst.append(inst)\r\n\r\n #not choosing instrument if it doesn't have data\r\n for binst in bad_inst:\r\n good_instruments.remove(binst)\r\n\r\n #if more than one instrument is available, choose which one to use\r\n if len(good_instruments) > 1:\r\n print('Please choose which instrument you would like to use.')\r\n\r\n for j in range(len(good_instruments)):\r\n print('Type ' + str(j) + ' for ' + str(good_instruments[j]))\r\n\r\n inst_choice = input('Answer:' )\r\n\r\n instrument = good_instruments[int(inst_choice)]\r\n end_date = good_end_dates[int(inst_choice)]\r\n\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n else:\r\n\r\n instrument = good_instruments[0]\r\n end_date = good_end_dates[0]\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n return([instrument,end_date])", "def sample_deformation_on_acq_dates(acq_dates, tc_def, def_dates):\n \n import numpy as np\n \n n_acq = len(acq_dates) # \n tc_def_resampled = np.zeros((n_acq, 1)) # initialise as empty (zeros)\n \n for acq_n, acq_date in enumerate(acq_dates): \n day_arg = def_dates.index(acq_date) # find which day number the acquiisiont day is\n day_def = tc_def[day_arg] # get the deformaiton for that day\n tc_def_resampled[acq_n, 0] = day_def # record\n \n return tc_def_resampled", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def getIntervalMatchingDate(self, basedate, clock=_clock_daily):\n resolve = lambda x: self.ucs[x] if x in self.ucs else None\n clock = int(clock)\n if clock == resolve(self._clock_daily):\n # daily\n return datetime.date.fromordinal(basedate.toordinal())\n elif clock == resolve(self._clock_weekly):\n # weekly\n if self.map_mode == self._map_to_start:\n return self.getWeekStartDate(basedate)\n else:\n return self.getWeekEndDate(basedate)\n elif clock == resolve(self._clock_monthly):\n # monthly\n if self.map_mode == self._map_to_start:\n return datetime.date(basedate.year, basedate.month, 1)\n else:\n return datetime.date(basedate.year, basedate.month, calendar.monthrange(basedate.year, basedate.month)[1])\n elif clock == resolve(self._clock_quarterly):\n # quarterly\n if basedate.month > 0 and basedate.month < 4:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=1, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=3, day=31)\n elif basedate.month >= 4 and basedate.month < 7:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=4, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=6, day=30)\n elif basedate.month >= 7 and basedate.month < 10:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=7, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=9, day=30)\n else:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=10, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=12, day=31)\n elif clock == resolve(self._clock_half_yearly):\n # half yearly\n if basedate.month <= 6:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=1, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=6, day=30)\n else:\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=7, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=12, day=31)\n elif clock == resolve(self._clock_yearly):\n # yearly\n if self.map_mode == self._map_to_start:\n return datetime.date(day=1, month=1, year=basedate.year)\n else:\n return datetime.date(year=basedate.year, month=12, day=31)", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def _get_input_date_range_for(self, from_output_dt, to_output_dt):\n # If comb is adaptive, the required input date range needs to account for the time window\n if self.is_adaptive:\n if from_output_dt is None:\n return from_output_dt, to_output_dt\n return from_output_dt-timedelta(days=self.time_window), to_output_dt\n # Otherwise, the comb is already trained and does not need to fill up the time window first\n return from_output_dt, to_output_dt", "def __rowAtDate__(self, analysis, input, test_date):\n # check if date is at a boundary of, or falls outside, the period of testing\n lower = analysis.__initialAnalysisPeriod__()\n upper = len(input[\"Date\"]) - 1\n \n if input[\"Date\"][lower]>=test_date:\n return lower\n \n elif input[\"Date\"][upper]<=test_date:\n return upper\n \n else: # perform binary search through test dates\n while True:\n middle = (lower + upper)/2\n \n if input[\"Date\"][middle-1]<test_date and input[\"Date\"][middle]>=test_date:\n return middle\n \n elif input[\"Date\"][middle]<test_date:\n lower = middle\n \n else: #input[\"Date\"][middle]>test_date:\n upper = middle", "def process(self, inputs):\n beg_date = datetime.datetime.strptime(self.conf['beg'], '%Y-%m-%d') # noqa: F841\n end_date = datetime.datetime.strptime(self.conf['end'], '%Y-%m-%d') # noqa: F841\n return df.query('datetime<@end_date and datetime>=@beg_date')", "def apply_filter(self, date_range, min_value=-np.inf, max_value=np.inf, r_type='Ed', accum_filter=False):\n df = self.get_sliced_radiance(date_range, r_type=r_type) if date_range else self[r_type]\n\n subset = df[(df.max(axis=1) > min_value) & (df.max(axis=1) < max_value)]['DateTime'].astype('str').to_list()\n\n if accum_filter and self._subset:\n self.subset = intersection(subset, self._subset)\n else:\n self.subset = subset\n\n # subset = intersection(subset, self.radiances['Rrs']['DateTime'].astype('str').to_list())\n\n # if accum_filter and len(self._subset) > 0:\n\n # self._subset = intersection(subset, self._subset) if not accum_filter else intersection", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def define_secdate(self):\r\n \r\n # Since 2017\r\n self.start_date = datetime.datetime(2017,1,1) + (datetime.datetime(2017,12,31) - datetime.datetime(2017,1,1))/2 \r\n self.end_date = datetime.datetime(2050,1,1)\r\n self.ktime = (self.end_date - self.start_date).days + 1\r\n self.date = np.zeros(self.ktime,dtype=datetime.datetime)\r\n self.t = np.zeros(self.ktime)\r\n self.dt = 1/365.25\r\n \r\n for k in range(0,self.ktime):\r\n \r\n self.date[k] = self.start_date + datetime.timedelta(days=self.t[k]*365.25)\r\n\r\n if k < self.ktime-1:\r\n \r\n self.t[k+1] = self.t[k] + self.dt\r\n \r\n # Since 1990\r\n self.start_date_hist = datetime.datetime(1990,1,1) + (datetime.datetime(1990,12,31) - datetime.datetime(1990,1,1))/2 \r\n self.ktime_1990_2050 = (self.end_date - self.start_date_hist).days + 1\r\n self.date_1990_2050 = np.zeros(self.ktime_1990_2050,dtype=datetime.datetime)\r\n self.t_1990_2050 = np.zeros(self.ktime_1990_2050)\r\n \r\n for k in range(0,self.ktime_1990_2050):\r\n \r\n self.date_1990_2050[k] = self.start_date_hist + datetime.timedelta(days=self.t_1990_2050[k]*365.25)\r\n \r\n if (self.date_1990_2050[k].year == self.start_date.year and self.date_1990_2050[k].month == self.start_date.month and self.date_1990_2050[k].day == self.start_date.day):\r\n \r\n self.ktime_proj_crossing = k\r\n \r\n \r\n if k < self.ktime-1:\r\n \r\n self.t_1990_2050[k+1] = self.t_1990_2050[k] + self.dt \r\n \r\n return", "def update_dates(start_date, end_date, freq):\n if (freq == \"MS\") or (freq == \"M\"):\n try:\n start_date = start_date.split(\"/\")\n end_date = end_date.split(\"/\")\n except AttributeError:\n start_date = [start_date.month, start_date.day, start_date.year]\n end_date = [end_date.month, end_date.day, end_date.year]\n if int(end_date[1]) < 22:\n\n if int(end_date[0]) == 1:\n end_month = 12\n end_year = int(end_date[2]) - 1\n else:\n end_month = int(end_date[0]) - 1\n end_year = end_date[2]\n\n end_date[0] = end_month\n end_date[2] = end_year\n\n start_date = pd.to_datetime(f\"{start_date[0]}/01/{start_date[2]}\")\n\n end_date = pd.to_datetime(\n f\"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}\"\n )\n\n if (freq == \"QS\") or (freq == \"Q\"):\n start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(\n startingMonth=1\n )\n end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()\n\n return (start_date, end_date)", "def test_date_range(self):\n chip = search.DateRangeChip()\n with self.assertRaises(ValueError):\n chip.start_time = \"20 minutes\"\n\n chip = search.DateRangeChip()\n date_string = \"2020-12-12T12:12:12,2020-12-12T12:12:12\"\n chip.from_dict({\"value\": date_string})\n\n expected_chip = {\n \"active\": True,\n \"field\": \"\",\n \"type\": \"datetime_range\",\n \"operator\": \"must\",\n \"value\": date_string,\n }\n\n self.assertEqual(chip.chip, expected_chip)\n self.assertEqual(chip.start_time, \"2020-12-12T12:12:12\")\n self.assertEqual(chip.end_time, \"2020-12-12T12:12:12\")\n\n chip_micro = search.DateRangeChip()\n date_string_micro = \"2020-12-12T12:12:12.000Z,2020-12-12T12:12:12.000Z\"\n chip_micro.from_dict({\"value\": date_string_micro})\n\n expected_chip = {\n \"active\": True,\n \"field\": \"\",\n \"type\": \"datetime_range\",\n \"operator\": \"must\",\n \"value\": date_string,\n }\n\n self.assertEqual(chip_micro.chip, expected_chip)\n self.assertEqual(chip_micro.start_time, \"2020-12-12T12:12:12\")\n self.assertEqual(chip_micro.end_time, \"2020-12-12T12:12:12\")\n\n chip_micro = search.DateRangeChip()\n date_string = \"2020-12-12T12:12:12.001,2020-12-12T12:12:12.002\"\n chip_micro.from_dict({\"value\": date_string})\n\n expected_chip = {\n \"active\": True,\n \"field\": \"\",\n \"type\": \"datetime_range\",\n \"operator\": \"must\",\n \"value\": date_string,\n }\n\n self.assertEqual(chip_micro.chip, expected_chip)\n self.assertEqual(chip_micro.start_time, \"2020-12-12T12:12:12.001\")\n self.assertEqual(chip_micro.end_time, \"2020-12-12T12:12:12.002\")", "def func_checker(*args, **kwargs):\n da = args[0]\n if \"start_date\" not in kwargs or kwargs[\"start_date\"] is None:\n # use string for first year only - .sel() will include all time steps\n kwargs[\"start_date\"] = da.time.min().dt.strftime(\"%Y\").values\n if \"end_date\" not in kwargs or kwargs[\"end_date\"] is None:\n # use string for last year only - .sel() will include all time steps\n kwargs[\"end_date\"] = da.time.max().dt.strftime(\"%Y\").values\n\n if isinstance(kwargs[\"start_date\"], int) or isinstance(kwargs[\"end_date\"], int):\n warnings.warn(\n \"start_date and end_date require dates in (type: str) \"\n 'using formats of \"%Y\", \"%Y-%m\" or \"%Y-%m-%d\".',\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"start_date\"] = str(kwargs[\"start_date\"])\n kwargs[\"end_date\"] = str(kwargs[\"end_date\"])\n\n try:\n sel_time = da.time.sel(time=kwargs[\"start_date\"])\n if sel_time.size == 0:\n raise ValueError()\n except KeyError:\n warnings.warn(\n '\"start_date\" not found within input date time range. Defaulting to minimum time step in '\n \"xarray object.\",\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"start_date\"] = da.time.min().dt.strftime(\"%Y\").values\n except ValueError:\n warnings.warn(\n '\"start_date\" has been nudged to nearest valid time step in xarray object.',\n UserWarning,\n stacklevel=2,\n )\n nudged = da.time.sel(time=slice(kwargs[\"start_date\"], None)).values[0]\n kwargs[\"start_date\"] = to_isoformat(nudged)\n\n try:\n sel_time = da.time.sel(time=kwargs[\"end_date\"])\n if sel_time.size == 0:\n raise ValueError()\n except KeyError:\n warnings.warn(\n '\"end_date\" not found within input date time range. Defaulting to maximum time step in '\n \"xarray object.\",\n UserWarning,\n stacklevel=2,\n )\n kwargs[\"end_date\"] = da.time.max().dt.strftime(\"%Y\").values\n except ValueError:\n warnings.warn(\n '\"end_date\" has been nudged to nearest valid time step in xarray object.',\n UserWarning,\n stacklevel=2,\n )\n nudged = da.time.sel(time=slice(None, kwargs[\"end_date\"])).values[-1]\n kwargs[\"end_date\"] = to_isoformat(nudged)\n\n if (\n da.time.sel(time=kwargs[\"start_date\"]).min()\n > da.time.sel(time=kwargs[\"end_date\"]).max()\n ):\n raise ValueError(\n f'Start date (\"{kwargs[\"start_date\"]}\") is after end date (\"{kwargs[\"end_date\"]}\").'\n )\n\n return func(*args, **kwargs)", "def resampleDataSet(dailyData, resampleString, resampleMethod, customFunction = None):\n\n # Make sure the index is sorted\n dailyData.sort_index(level='Datetime', inplace=True)\n\n # Get today's date\n today = datetime.now()\n\n # Create a new empty series\n resampleData = pd.Series([], index = pd.DatetimeIndex([]))\n\n # Get information about the daily data\n firstDate = dailyData.index[0][0]\n\n # Parse the resample string\n resampleList = resampleString.split('/') # Converts 'R/1978-10-01/P1M/F1Y' into ['R', '1978-10-01', 'P1M', 'F1Y', 'S1Y']\n\n # Validate the list\n if resampleList[0] != 'R' or len(resampleList[1]) != 10 or resampleList[2][0] != 'P' or resampleList[3][0] != 'F': #or len(resampleList) != 4\n return resampleData, 1, 'Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y'\n \n # Validate the resample method\n if resampleMethod not in ['accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median']:\n return resampleData, 1, \"Invalid resampling method. Provide one of 'accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median'\"\n\n # Parse into values\n startDate = datetime.strptime(resampleList[1], '%Y-%m-%d') # >>> datetime.date(1978, 10, 1)\n period = isodate.parse_duration(resampleList[2]) # >>> isodate.duration.Duration(0, 0, 0, years=0, months=1)\n # Change the period to 1 day if the resample method is 'first'\n if resampleMethod == 'first':\n period = isodate.parse_duration(\"P1D\")\n frequency = isodate.parse_duration(resampleList[3].replace('F', 'P')) # >>> isodate.duration.Duration(0, 0, 0, years=1, months=1)\n\n # Create all the periods\n periods = []\n tracker = startDate\n while tracker <= today: # >>> periods = [(datetime.datetime(1978-10-01), datetime.datetime(1978-11-01))]\n periods.append((tracker, tracker+period))\n tracker += frequency\n\n # Parse the function\n func = lambda x: np.nan if x.isnull().all() else (np.nanmean(x) if resampleMethod == 'average' else (\n np.nansum(x) if resampleMethod == 'accumulation' else (\n 86400*(1/43560000)*np.nansum(x) if resampleMethod == 'accumulation_cfs_kaf' else (\n x.iloc[0] if resampleMethod == 'first' else (\n x.iloc[-1] if resampleMethod == 'last' else (\n np.nanmedian(x) if resampleMethod == 'median' else (\n np.nanmax(x) if resampleMethod == 'max' else (\n np.nanmin(x) if resampleMethod == 'min' else eval(customFunction)))))))))\n\n # Resample the data\n for idx in pd.IntervalIndex.from_tuples(periods):\n data = dailyData.loc[idx.left : idx.right]\n if resampleMethod != 'first' and resampleMethod != 'last':\n data.isMostlyThere = len(data) > int(0.95*(idx.right-idx.left).days) # Check to make sure 95% of data is there!\n else:\n data.isMostlyThere = True\n resampleData.loc[idx.left] = ( func(data) if (idx.right >= firstDate and today >= idx.right and (data.isMostlyThere)) else np.nan )\n\n if len(resampleList) == 5:\n shiftStrings = list(resampleList[4])\n if shiftStrings[1].isdigit():\n resampleData.index = resampleData.index + pd.offsets.DateOffset(years=int(shiftStrings[1]))\n else:\n return resampleData, 1, \"Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y\"\n\n\n # Name the dataframe\n resampleData.name = dailyData.name + '_' + resampleList[1] + '_' + resampleList[2] + '_' + resampleList[3] + '_' + resampleMethod + '_' + str(customFunction)\n\n return resampleData", "def _date_range_to_index(start_date, end_date):\n start_index = pd.to_datetime(start_date).day\n end_index = pd.to_datetime(end_date).day + 1\n return np.arange(start_index, end_index)", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def pd_date_range(*args, **kwargs):\n incl = version.parse(pd.__version__) >= version.parse('1.4.0')\n\n if incl and 'closed' in kwargs:\n kwargs['inclusive'] = kwargs.pop('closed')\n elif not incl and 'inclusive' in kwargs:\n kwargs['closed'] = kwargs.pop('inclusive')\n if kwargs['closed'] == 'both':\n kwargs['closed'] = None\n\n return pd.date_range(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
will create JSON output files if there are two events (for each threshold) in one time window. Ie, if there are two >10MeV >10pfu events as well as two >100MeV >1pfu events, will create files for all four events, but if there are three >100MeV >1pfu events, will only generate JSON files for the first two. Second events have different thresholds in different files as opposed to together.
def two_in_one(obs_file,et,subevent): #in this function, the "original time window" talked about in the comments #refers to the start and end times that were input to create the file obs_file, #which will likely have been created using the database_extraction function #opening first output file created by operational_sep_quantities with open(obs_file, 'r') as o: out = js.load(o) #all events recorded in that output file ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events']) #creating lists for values from each event end_times = [] start_times = [] energy_thresholds = [] flux_thresholds = [] out_names = [] #appending values to lists for each event for i in range(len(ongoing_events)): start_times.append(parse(ongoing_events[i]['start_time'])) end_times.append(parse(ongoing_events[i]['end_time'])) energy_thresholds.append(ongoing_events[i]['energy_min']) flux_thresholds.append(float(ongoing_events[i]['threshold'])) #checking if there was a second event for each threshold for i in range(len(end_times)): end = end_times[i] #if the end time of an event for any threshold was a day before the last day #in the original time window given, will check if ONLY THAT THRESHOLD #had another event after the first one, using the end time of the first #event of that threshold as the new start time of the event window if end.date() < et.date(): print('end time to use as new start time: %s' %end) #figuring out which threshold this end time was for flux_thresh = int(flux_thresholds[i]) energy_thresh = int(energy_thresholds[i]) print('extracting second event for threshold ' + str(flux_thresh) + ' MeV ' + str(energy_thresh) + ' pfu') #new start time (2 days in advance bc the database_extraction function #makes the start time 2 days prior, so will cancel that out) st = end + timedelta(days=2) #thresholds in correct format thresholds = str(energy_thresh) + ',' + str(flux_thresh) print('thresholds: %s' %thresholds) #creating observation data for second event for thresholds given out_names.append(Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent, thresholds = thresholds, one_thresh = True)) #returns list of all new files created by this function return(out_names)
[ "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def generate_signal_json():\n paramsubdirs = eosls(EOSPATH_SIG)\n json_4mu, json_2mu2e = {}, {}\n for subdir in paramsubdirs:\n if '4Mu' in subdir:\n key = subdir.replace('SIDM_BsTo2DpTo4Mu_', '').split('_ctau')[0].replace('MBs', 'mXX').replace('MDp', 'mA')\n key += '_lxy-300' # mXX-1000_mA-0p25_lxy-300\n timestampdirs = eosls(join(EOSPATH_SIG, subdir))\n timestampdirs = sorted(timestampdirs, key=lambda x: datetime.strptime(x, \"%y%m%d_%H%M%S\"))\n latest = join(EOSPATH_SIG, subdir, timestampdirs[-1])\n json_4mu[key] = [f for f in eosfindfile(latest) if '/failed/' not in f]\n if '2Mu2e' in subdir:\n key = subdir.replace('SIDM_BsTo2DpTo2Mu2e_', '').split('_ctau')[0].replace('MBs', 'mXX').replace('MDp', 'mA')\n key += '_lxy-300'\n timestampdirs = eosls(join(EOSPATH_SIG, subdir))\n timestampdirs = sorted(timestampdirs, key=lambda x: datetime.strptime(x, \"%y%m%d_%H%M%S\"))\n latest = join(EOSPATH_SIG, subdir, timestampdirs[-1])\n json_2mu2e[key] = [f for f in eosfindfile(latest) if '/failed/' not in f]\n\n ## samples with new naming\n for subdir in eosls(EOSPATH_SIG2['4mu']):\n key = subdir.split('_ctau')[0] # mXX-100_mA-5_lxy-0p3\n timestampdirs = eosls(join(EOSPATH_SIG2['4mu'], subdir))\n timestampdirs = sorted(timestampdirs, key=lambda x: datetime.strptime(x, \"%y%m%d_%H%M%S\"))\n latest = join(EOSPATH_SIG2['4mu'], subdir, timestampdirs[-1])\n json_4mu[key] = [f for f in eosfindfile(latest) if '/failed/' not in f]\n for subdir in eosls(EOSPATH_SIG2['2mu2e']):\n key = subdir.split('_ctau')[0] # mXX-100_mA-5_lxy-0p3\n timestampdirs = eosls(join(EOSPATH_SIG2['2mu2e'], subdir))\n timestampdirs = sorted(timestampdirs, key=lambda x: datetime.strptime(x, \"%y%m%d_%H%M%S\"))\n latest = join(EOSPATH_SIG2['2mu2e'], subdir, timestampdirs[-1])\n json_2mu2e[key] = [f for f in eosfindfile(latest) if '/failed/' not in f]\n\n with open('signal_4mu.json', 'w') as outf:\n outf.write(json.dumps(json_4mu, indent=4))\n with open('signal_2mu2e.json', 'w') as outf:\n outf.write(json.dumps(json_2mu2e, indent=4))", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def get_metrics_files(project, MIN_DIFFERENCE):\n print(\"LOG: Starting with\", project)\n\n # Get the latest two metrics for this project which are MIN_DIFFERENCE days apart\n re_metrics = re.compile(r\"METRICS-\\d{4}-\\d{2}-\\d{2}.json\")\n all_metrics = []\n\n for filename in os.listdir(project):\n if re_metrics.match(filename):\n all_metrics.append(filename)\n\n all_metrics.sort()\n\n # Come back later when there are atleast two generated metrics files\n if len(all_metrics) < 2:\n return False, {}, {}\n\n current_metrics_json_file = all_metrics.pop()\n print(\"LOG: Current metrics json file\", current_metrics_json_file)\n\n # If the latest Metrics is older than MIN_DIFFERENCE, then don't generate report\n # This is possible in cases of repo turning private or moving out\n today_datestamp = datetime.datetime.now()\n latest_datestamp = datetime.datetime.strptime(current_metrics_json_file, \"METRICS-%Y-%m-%d.json\")\n datetime_delta = today_datestamp - latest_datestamp\n if datetime_delta.days > MIN_DIFFERENCE:\n print(\"Skipping report for\", project, \"Latest metrics file is older than MIN_DIFFERENCE\")\n return False, {}, {}\n\n previous_metrics_json_file = None\n previous_metrics_index_index = len(all_metrics) - 1\n while(previous_metrics_index_index >= 0):\n # Calculate difference between last two metrics\n d1 = datetime.datetime.strptime(current_metrics_json_file, \"METRICS-%Y-%m-%d.json\")\n d2 = datetime.datetime.strptime(all_metrics[previous_metrics_index_index], \"METRICS-%Y-%m-%d.json\")\n if (d1 - d2).days > MIN_DIFFERENCE:\n previous_metrics_json_file = all_metrics[previous_metrics_index_index]\n print(\"LOG: Previous metrics json\", previous_metrics_json_file)\n break\n else:\n previous_metrics_index_index -= 1\n\n # Metrics are not older than MIN_DIFFERENCE days\n if previous_metrics_json_file is None:\n return False, {}, {}\n\n return True, current_metrics_json_file, previous_metrics_json_file", "def events_filename(level: VerbosityType, with_bz2: bool = True) -> str:\n suffix = \"\" if level == 0 else f\"_{level}\"\n file_extension_suffix = \".bz2\" if with_bz2 else \"\"\n return f\"events{suffix}.json{file_extension_suffix}\"", "def test_createJSON(testData, mktestfile):\n from outputter import createJSON\n from outputter import createDictionary\n trueDict = createDictionary(testData)\n JSONfile = createJSON(trueDict, mktestfile)\n with open(JSONfile, 'r') as readJSON:\n insideDict = json.load(readJSON)\n check = True\n if insideDict.get(\"mean_hr_bpm\") != 12:\n check = False\n if insideDict.get(\"voltage_extremes\") != [0.0, 100.0]:\n check = False\n logging.warning('{}'.format(insideDict.get(\"voltage_extremes\")))\n logging.warning('{}'.format(trueDict.get(\"voltage_extremes\")))\n if insideDict.get(\"duration\") != 10:\n check = False\n if insideDict.get(\"num_beats\") != 2:\n check = False\n if insideDict.get(\"beats\") != [3.5, 7.5]:\n check = False\n assert check is True", "def ExportJSON():\r\n print(\"\\n\\n======== json plugin\\n\")\r\n\r\n # log file location\r\n log_path = \"/\"\r\n log_filename = \"loki_scan_log.csv\"\r\n\r\n # ensure expected output folders exist\r\n results_path = \"/loki/src/results/\"\r\n if not os.path.isdir(results_path):\r\n os.mkdir(results_path)\r\n\r\n # csv line format: TIMESTAMP, HOSTNAME, MESSAGE_TYPE, MODULE, MESSAGE\r\n TIMESTAMP = 0\r\n MESSAGE_TYPE = 2 # = {INFO, WARNING, ALERT}\r\n MODULE = 3 # = {FileScan, ProcessScan}\r\n MESSAGE = 4 # = Scanning $path TYPE: %s SIZE: %d\r\n\r\n json_output = {\r\n \"info\": {},\r\n \"alerts\": {},\r\n \"warnings\": {}\r\n }\r\n\r\n with open(log_path + log_filename, 'r') as fp:\r\n csv_reader = csv.reader(fp, delimiter=',')\r\n timestamp = 0\r\n for row in csv_reader:\r\n\r\n # Get the scan's timestamp (used for start/end time)\r\n timestamp = row[TIMESTAMP]\r\n\r\n if row[MODULE] == \"FileScan\":\r\n\r\n if row[MESSAGE_TYPE] == 'INFO':\r\n details = parse('Scanning {location} TYPE: {filetype} SIZE: {size}', row[MESSAGE])\r\n \r\n # skip messages that don't have the right format\r\n if details == None:\r\n continue\r\n\r\n # If meta info hasn't been initialised yet (first scan)\r\n if \"meta\" not in json_output:\r\n json_output[\"meta\"] = {}\r\n json_output[\"meta\"][\"total_files_scanned\"] = 0\r\n json_output[\"meta\"][\"total_bytes\"] = 0\r\n json_output[\"meta\"][\"total_warnings\"] = 0\r\n json_output[\"meta\"][\"total_alerts\"] = 0\r\n json_output[\"meta\"][\"start_time\"] = timestamp\r\n json_output[\"meta\"][\"scan_path\"] = details['location']\r\n \r\n # First time seeing this type\r\n if details['filetype'] not in json_output['info']:\r\n json_output['info'][details['filetype']] = {\r\n \"total_type_scanned\" : 0,\r\n \"cumulative_bytes\": 0\r\n }\r\n\r\n # Increment values\r\n json_output['info'][details['filetype']][\"total_type_scanned\"] += 1\r\n json_output['info'][details['filetype']][\"cumulative_bytes\"] += int(details['size'])\r\n json_output['meta'][\"total_files_scanned\"] += 1\r\n json_output['meta'][\"total_bytes\"] += int(details['size'])\r\n\r\n elif row[MESSAGE_TYPE] in ['WARNING', 'ALERT']:\r\n scan_type = row[MESSAGE_TYPE].lower() + \"s\"\r\n details = parse('FILE: {file} SCORE: {score} TYPE: {filetype} SIZE: {size} FIRST_BYTES: {first_bytes} MD5: {md5} SHA1: {sha1} SHA256: {sha256} CREATED: {created} MODIFIED: {modified} ACCESSED: {accessed} REASON_1: {remainder}', row[MESSAGE])\r\n\r\n # skip messages that don't have the right format\r\n if details == None:\r\n print(\"skipping {}\".format(row[MESSAGE]))\r\n continue\r\n\r\n json_output[scan_type][details['file']] = {\r\n 'warning_meta': {\r\n \"created\": details['created'],\r\n \"modified\": details['modified'],\r\n \"accessed\": details['accessed'],\r\n \"filetype\": details['filetype'],\r\n \"size\": details['size']\r\n },\r\n \"score\": details['score'],\r\n \"first_bytes\": details['first_bytes'],\r\n 'hashes': {\r\n \"md5\": details['md5'],\r\n \"sha1\": details['sha1'],\r\n \"sha256\": details['sha256']\r\n },\r\n 'reasons': []\r\n }\r\n\r\n # This is a bit hacky, but I add REASON_1 back in after it was stripped by the above parse method\r\n # then prepend REASON back into the string after the regex split as it allows for the following \r\n # string-split and search-loop to be cleaner.\r\n # \r\n # The string-split regex matches all REASONS within the string and splits them.\r\n # The 0th index is removed because remainder is guaranteed to start with \"REASON_1\" and\r\n # re.split() will include an string for anything occuring before the first match (in this case, nothing)\r\n remainder = \"REASON_1: \" + details['remainder']\r\n reasons = re.split('REASON_\\d+:', remainder)\r\n reasons.pop(0) \r\n for i in range(0, len(reasons)):\r\n reasons[i] = 'REASON:' + reasons[i]\r\n\r\n # Each entry in reasons[] is a string beginning with \"REASON:\", followed by <k,v> pairs\r\n # in the format <\"$KEY:\", value>. For each of a REASON's keys, the loop creates\r\n # and populates a dictionary for it in the following format.\r\n #\r\n # \"reasons\": [\r\n # {\r\n # \"reason\": \"Malware Hash\",\r\n # \"type\": \"MD5\",\r\n # \"hash\": \"8979594423b68489024447474d113894\",\r\n # \"subscore\": \"100\"\r\n # }]\r\n #\r\n # note: the following is required as the format of Loki logs past 'REASON' is often missing spacing\r\n # between reasons and the order/existence of the keys is not guaranteed.\r\n current_reason = {}\r\n potential_keys = [\"REASON\", \"MATCH\", \"TYPE\", \"DESCRIPTION\", \"HASH\", \"SUBSCORE\", \"DESC\", \"REF\", \"MATCHES\"]\r\n for reason in reasons:\r\n for k in potential_keys:\r\n search_regex = '(?<=' + k + ': )(.*?)(?= [A-Z]+_?\\d*:)'\r\n reason_entry = re.search(search_regex, reason)\r\n # if we matched a key value (e.g. SUBSCORE: 42)\r\n if reason_entry:\r\n # REASONS & their key values are what we are trying to insert as an object\r\n if k == \"REASON\":\r\n if 'reason' not in current_reason: # first match on reason\r\n current_reason['reason'] = reason_entry.group()\r\n else: #another reason exists, add the previous one and create this one\r\n json_output[scan_type][details['file']]['reasons'].append(current_reason)\r\n current_reason = {\r\n 'reason': reason_entry.group()\r\n }\r\n else:\r\n current_reason[k.lower()] = reason_entry.group()\r\n\r\n # add the currently parsed reason\r\n json_output[scan_type][details['file']]['reasons'].append(current_reason)\r\n\r\n # increment meta counter\r\n json_output['meta'][\"total_\" + scan_type] += 1 \r\n\r\n # add in last-seen timestamp\r\n json_output[\"meta\"][\"end_time\"] = timestamp\r\n\r\n print(\"Processed {} files, {} warnings and {} alerts\".format(\r\n json_output[\"meta\"][\"total_files_scanned\"],\r\n json_output[\"meta\"][\"total_warnings\"],\r\n json_output[\"meta\"][\"total_alerts\"],\r\n ))\r\n\r\n # json export\r\n results_filename = \"loki_results.json\"\r\n\r\n # results_filename = \"loki_{}_{}.json\".format(json_output[\"meta\"][\"scan_path\"], \r\n # time.strftime('%H-%M-%S', time.localtime()))\r\n\r\n with open(results_path + results_filename, 'w') as fp:\r\n json.dump(json_output, fp)\r\n print(\"\\n======== json plugin complete\\n\")", "def create_otb_results(ann, trk_results, trk_names):\n t = np.linspace(0.0, 1.0, 21)\n\n for trk_r, trk_name in zip(trk_results, trk_names):\n json_result_dir = 'results/OPE/' + trk_name + '/scores_uav123'\n if not os.path.exists(json_result_dir):\n os.makedirs(json_result_dir)\n\n json_file = os.path.join(json_result_dir, \"ALL.json\")\n if os.path.exists(json_file):\n continue\n\n print(\"Processing:\", trk_name)\n\n success_rate_list = []\n for i, threshold in enumerate(t):\n if trk_name.find('SORT') != -1 or trk_name.find('YOLO') != -1 or trk_name.find('IoU') != -1:\n isMOTformat = True\n else:\n isMOTformat = False\n success_rate_list.append(overlap_precision(ann, trk_r, threshold, isMOTformat))\n mkjson(trk_name, success_rate_list)", "def generate_no_time_two_files():\n fname = {'stress': 'resources/simple_stress_no_time.json',\n 'strain': 'resources/simple_strain_no_time.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100))\n )]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1))\n )])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return {\n 'file_names': fname,\n 'expected': {\n 'stress': expected[0],\n 'strain': expected[1]\n }\n }", "def slice_export_jsons(collected_jsons: dict, output_path: str) -> dict:\n\n single_jsons = {}\n for jsons_list in collected_jsons.values():\n for json_file in jsons_list:\n company_id = json_file[\"nip\"]\n with open(output_path + company_id + '_VATCHECK_' + str(date.today()) + '.json', 'w') as file:\n json.dump(json_file, file)\n single_jsons[company_id] = json_file\n print(\"Collected data successfully sliced and exported into separated files (%s files).\" % len(single_jsons))\n return single_jsons", "def write_event(event):\n file_name = \"/tmp/api%s.log\" % event['ChosenPartition']\n with open(file_name, 'w') as f:\n f.write(json.dumps(event))", "def clean_and_jsonify(fnames, flag,precision=5):\n for f in fnames:\n soup = get_soup(fname=f)\n data = load_lists(soup, flag)\n clean_data(data,flag,precision)\n if 'max' in f or 'min' in f:\n recalculate_annual_data(data,True,precision)\n else:\n recalculate_annual_data(data,False,precision)\n new = open(f[0:-5]+\".json\",'w')\n json.dump(data,new)\n new.close()", "def historize_log_values(write_file_to: str, values: str):\n if path.isfile(write_file_to ):\n historic = read_json_file(write_file_to)\n historic += values\n unique = []\n [unique.append(elem) for elem in historic if elem not in unique]\n unique.sort(key=lambda event: datetime.strptime(event[\"eventTime\"], \"%Y-%m-%d %H:%M:%S\"))\n write_to_json_file(write_file_to, unique)\n else:\n write_to_json_file(write_file_to, values)", "def makeXSecWeights(inDir, jsonfiles, options):\n xsecweights = {}\n tot_ngen = {}\n missing_files = []\n for jfname in jsonfiles:\n ## Rootfiles are in subdirs for the syst and mass_scan samples:\n dirname = inDir\n if 'syst' in jfname: dirname = os.path.join(inDir,'syst')\n if 'mass_scan' in jfname: dirname = os.path.join(inDir,'mass_scan')\n if 'qcd' in jfname: dirname = os.path.join(inDir,'qcd_control')\n if 'z_s' in jfname: dirname = os.path.join(inDir,'z_control')\n if 'photon_samples' in jfname: dirname = os.path.join(inDir,'photon_control')\n jsonFile = open(jfname,'r')\n procList = json.load(jsonFile,encoding = 'utf-8').items()\n\n # Make a survey of *all* existing plots\n for proc_tag in procList:\n for desc in proc_tag[1]:\n data = desc['data']\n isData = desc.get('isdata',False)\n mctruthmode = desc.get('mctruthmode')\n for process in data:\n dtag = process.get('dtag','')\n procKey=dtag\n if mctruthmode : procKey += '_filt'+str(mctruthmode)\n print \"... processing %s\"%procKey\n\n if isData:\n xsecweights[procKey] = 1.0\n tot_ngen[procKey] = 0\n continue\n\n split = process.get('split',1)\n\n try:\n ngen = tot_ngen[procKey]\n except KeyError:\n ngen = 0\n\n for segment in range(0,split):\n eventsFile = dtag\n if split > 1:\n eventsFile = dtag + '_' + str(segment)\n if mctruthmode:\n eventsFile += '_filt%d' % mctruthmode\n rootFileUrl = dirname+'/'+eventsFile+'.root'\n rootFile = openTFile(rootFileUrl)\n if rootFile is None:\n missing_files.append(eventsFile+'.root')\n continue\n\n ngen_seg,_ = getNormalization(rootFile)\n if not isData: ngen += ngen_seg\n\n rootFile.Close()\n\n tot_ngen[procKey] = ngen\n\n # Calculate weights:\n for process in data:\n dtag = process.get('dtag','')\n procKey=dtag\n if mctruthmode : procKey += '_filt'+str(mctruthmode)\n brratio = process.get('br',[1])\n xsec = process.get('xsec',1)\n if procKey not in xsecweights.keys():\n try:\n ngen = tot_ngen[procKey]\n finalBR=1\n for br in brratio: finalBR *=br\n xsecweights[procKey] = finalBR*xsec/ngen\n except ZeroDivisionError:\n if isData:\n xsecweights[procKey] = 1.0\n else:\n print \">>> ERROR: ngen not set for\", procKey\n\n\n if len(missing_files) and options.verbose>0:\n print 20*'-'\n print \"WARNING: Missing the following files:\"\n for filename in missing_files:\n print filename\n print 20*'-'\n\n cachefile = open(\".xsecweights.pck\", 'w')\n pickle.dump(xsecweights, cachefile, pickle.HIGHEST_PROTOCOL)\n cachefile.close()\n print '>>> Produced xsec weights and wrote to cache (.xsecweights.pck)'\n return 0", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def dump_to_json(fileinfos, out):\n jsonarray = json.dumps(fileinfos)\n json_filename = \"all_elements_used.json\"\n text_file = open(os.path.join(out,out_dir_name,json_filename), \"w\")\n text_file.write(jsonarray)\n text_file.close()\n stdout.write(\"... \"+json_filename+\" created\\n\")", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def process_events_json(events):\n trace=process_events(events)\n return json.dumps(trace, separators=(',',':'), indent=2)", "def generate_expected_two_files():\n fname = {'stress': 'resources/simple_stress.json',\n 'strain': 'resources/simple_strain.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, 100))))]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, 100))))])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return {\n 'file_names': fname,\n 'expected': {\n 'stress': expected[0],\n 'strain': expected[1]\n }\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
all events in one time window (not just two) used if there is more than one event occurring within a short time period. will generate an output file for every event that occurs within a given time window not to be confused with many_events, which generates output given multiple time windows. Can create files for up to 3 events within specified time window.
def multi_event(st,et,instrument_chosen,subevent): print('checking for multiple events within given time window') #creating file for time window with first events for all thresholds out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent) #creating files for all second events for all thresholds new_files = two_in_one(out_name,et,subevent) #creating files for any third events for all thresholds that had a second event for file in new_files: two_in_one(file,et,subevent) return
[ "def write_events(events: list, calendar):\n for event in events:\n location = event[\"location\"]\n vevent_string = str()\n vevent_string += 'BEGIN:VEVENT\\n'\n vevent_string += 'DTSTAMP:20210904T194914Z\\n' # here we have to add a custom date\n vevent_string += f'DTSTART;TZID=Europe/Paris:{event[\"date\"]}T{event[\"start_time\"]}\\n'\n vevent_string += f'DTEND;TZID=Europe/Paris:{event[\"date\"]}T{event[\"end_time\"]}\\n'\n vevent_string += f'SUMMARY:{event[\"title\"]}\\n'\n if location is not None:\n vevent_string += f'LOCATION:{location}\\n'\n vevent_string += 'END:VEVENT\\n'\n calendar.write(vevent_string)\n logging.info(f'event {event[\"title\"]} successfully writen')", "def INPUT_Periods_file(input):\n \n global events\n \n tb = 3600\n ta = 3600\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n len_events = len(events)\n \n input_period = open(os.path.join(os.getcwd(), 'INPUT-Periods'), 'a+')\n\n for i in range(0, len_events):\n \n str_event = str(events[i]['datetime']-tb) + '_' + \\\n str(events[i]['datetime']+ta) + '_' + \\\n str(events[i]['magnitude'] - 0.01) + '_' + \\\n str(events[i]['magnitude'] + 0.01) + '\\n'\n input_period.writelines(str_event)\n \n input_period.close()\n \n print '************************************************************' \n print 'New INPUT-Periods file is generated in your folder.'\n print 'Now, you could run the program again based on your desired event :)' \n print '************************************************************'\n \n sys.exit()", "def _make_output(self, events):\n\n # group by event date\n f = lambda e: e.start_time.to_date()\n evs = ['\\n'.join(e.to_format(self.format) for e in g) for k, g in itertools.groupby(events, f)]\n sep = (omap(lambda s: '\\n' + s, self.separator) or '') + '\\n'\n\n # make the result string\n return sep.join(evs)", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def write_busiest_times(top_n):\n file_name = 'hours.txt'\n output_location = settings.log_output_file(file_name)\n try:\n os.remove(output_location)\n except OSError:\n pass\n with open(output_location, 'a') as file:\n while top_n:\n date_str = timestamp_to_date_str(max_key(top_n))\n file.write(date_str + ',' + str(max_value(top_n)) + '\\n')\n top_n.pop(max_key(top_n), None)", "def export_aggregated_events(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out, not_paired_obs_list = \"\", []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n return\n\n parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n\n # check for grouping results\n flag_group = True\n if len(selectedObservations) > 1:\n flag_group = dialog.MessageDialog(programName, \"Group events from selected observations in one file?\",\n [YES, NO]) == YES\n\n extended_file_formats = [\"Tab Separated Values (*.tsv)\",\n \"Comma Separated Values (*.csv)\",\n \"Open Document Spreadsheet ODS (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\",\n \"SDIS (*.sds)\",\n \"SQL dump file (*.sql)\"]\n\n if flag_group:\n file_formats = [\"tsv\", \"csv\", \"ods\", \"xlsx\", \"xls\", \"html\", \"sds\",\n \"sql\"] # must be in same order than extended_file_formats\n\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self,\n \"Export aggregated events\",\n \"\", \";;\".join(extended_file_formats))\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Export aggregated events\", \"\",\n \";;\".join(extended_file_formats))\n\n if not fileName:\n return\n\n outputFormat = file_formats[extended_file_formats.index(filter_)]\n if pathlib.Path(fileName).suffix != \".\" + outputFormat:\n fileName = str(pathlib.Path(fileName)) + \".\" + outputFormat\n\n else: # not grouping\n\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma Separated values (*.csv)\",\n \"Open Document Spreadsheet (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\")\n item, ok = QInputDialog.getItem(self, \"Export events format\", \"Available formats\", items, 0, False)\n if not ok:\n return\n outputFormat = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to export events\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if outputFormat == \"sql\":\n _, _, conn = db_functions.load_aggregated_events_in_db(self.pj,\n parameters[\"selected subjects\"],\n selectedObservations,\n parameters[\"selected behaviors\"])\n try:\n with open(fileName, \"w\") as f:\n for line in conn.iterdump():\n f.write(\"{}\\n\".format(line))\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n data_header = tablib.Dataset()\n data_header.title = \"Aggregated events\"\n header = [\"Observation id\", \"Observation date\", \"Media file\", \"Total length\", \"FPS\"]\n if INDEPENDENT_VARIABLES in self.pj:\n for idx in sorted_keys(self.pj[INDEPENDENT_VARIABLES]):\n header.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n header.extend([\"Subject\", \"Behavior\"])\n header.extend([\"Modifiers\"])\n header.extend([\"Behavior type\", \"Start (s)\", \"Stop (s)\", \"Duration (s)\", \"Comment start\", \"Comment stop\"])\n data_header.append(header)\n\n data = copy.deepcopy(data_header)\n for obsId in selectedObservations:\n d = export_observation.export_aggregated_events(self.pj, parameters, obsId)\n data.extend(d)\n\n if not flag_group:\n fileName = str(\n pathlib.Path(pathlib.Path(exportDir) / safeFileName(obsId)).with_suffix(\".\" + outputFormat))\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n data = copy.deepcopy(data_header)\n\n if outputFormat == \"sds\": # SDIS format\n out = \"% SDIS file created by eMOC (www.eMOC.unito.it) at {}\\nTimed <seconds>;\\n\".format(\n datetime_iso8601())\n for obsId in selectedObservations:\n # observation id\n out += \"\\n<{}>\\n\".format(obsId)\n dataList = list(data[1:])\n for event in sorted(dataList, key=lambda x: x[-4]): # sort events by start time\n if event[0] == obsId:\n behavior = event[-7]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n behavior = behavior.replace(char, \"_\")\n subject = event[-8]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n subject = subject.replace(char, \"_\")\n event_start = \"{0:.3f}\".format(\n round(event[-4], 3)) # start event (from end for independent variables)\n if not event[-3]: # stop event (from end)\n event_stop = \"{0:.3f}\".format(round(event[-4] + 0.001, 3))\n else:\n event_stop = \"{0:.3f}\".format(round(event[-3], 3))\n out += \"{subject}_{behavior},{start}-{stop} \".format(subject=subject, behavior=behavior,\n start=event_start, stop=event_stop)\n out += \"/\\n\\n\"\n with open(fileName, \"wb\") as f:\n f.write(str.encode(out))\n return\n\n if flag_group:\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def write_events_to_file(events, events_file):\n parent_dir = os.path.split(events_file)[0]\n if not os.path.exists(parent_dir):\n os.system('mkdir -p ' + parent_dir)\n pickle.dump(events, open(events_file, 'wb'))", "def runEventCreation():\r\n config = CONFIG['steps']['EventCreation']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n\r\n min_window_size = ci['min_window_size']\r\n change_speed_by = ci['change_speed_by']\r\n speed_ratio = ci['train_zero_speed_ratio']\r\n datetime_limit = ci['datetime_limit']\r\n csv_name_prefix = ci['csv_name_prefix']\r\n input_bucket = ci['bucket']\r\n window_event_bucket = ci['window_event_bucket']\r\n window_events_file = ci['window_events_file']\r\n\r\n output_bucket = co['bucket']\r\n event_dir = co['event_dir']\r\n filename_include = co['filename_include']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n csv_files = get_files(input_bucket, boto_client,\r\n file_type='csv', prefix='filtered')\r\n csv_files = ['filtered/7016_2020-09-09.csv']\r\n create_window_event(files=csv_files,\r\n input_bucket=input_bucket,\r\n output_bucket=output_bucket,\r\n minio_client=minioClient,\r\n min_window_size=min_window_size,\r\n ouput_dir=event_dir,\r\n window_event_bucket=window_event_bucket,\r\n window_events_file=window_events_file,\r\n csv_name_prefix=csv_name_prefix,\r\n change_speed_by=change_speed_by,\r\n train_zero_speed_ratio=speed_ratio,\r\n datetime_limit=datetime_limit,\r\n filename_include=filename_include)", "def write_log_events(self, log_events):\n # Create log file name.\n # Replace / with - so LogGroup names can be written to current directory.\n file_name = self.log_group.name.replace('/', '-') + \"-\" + self.name + '-0.log'\n\n # Append LogEvents to log file.\n with open(file_name, 'a') as log_file:\n for event in log_events:\n log_file.write(event.message + '\\n')\n print('Wrote ' + str(len(log_events)) + ' LogEvents to ' + file_name)\n\n # Rotate log file if it's bigger than limit\n log_file_size = os.path.getsize(file_name)\n\n if log_file_size > self.log_file_limit:\n rotated_file_name = file_name.split('.')[0] + '-' + str(int(time.time())) + \".log\"\n print('Rotating ' + file_name + ' to ' + rotated_file_name)\n os.rename(file_name, rotated_file_name)", "def buildEvents(self):\r\n # Make sure the destination folder exists\r\n if not(os.path.exists(os.path.dirname(self.file_name))):\r\n print(self.file_name)\r\n os.makedirs(os.path.dirname(self.file_name))\r\n\r\n # Create an empty list and data frame for the\r\n tile_list = []\r\n columns = [\"id\", \"date\", \"x\", \"y\", \"edge\", \"tile\"]\r\n df = pd.DataFrame(columns=columns)\r\n base = dt.datetime(1970, 1, 1)\r\n\r\n # Loop through each netcdf file and build individual tile events\r\n for file in self.files:\r\n tile_id = file[-9:-3]\r\n if os.path.exists(\r\n os.path.join(\r\n self.proj_dir, \"tables/events/\" + tile_id + \".csv\")\r\n ):\r\n print(tile_id + \" event table exists, skipping...\")\r\n elif not os.path.exists(\r\n os.path.join(self.proj_dir, \"rasters/burn_area/netcdfs/\" + tile_id + \".nc\")):\r\n pass\r\n else:\r\n print(\"\\n\" + tile_id)\r\n\r\n # Create a new event object\r\n builder = EventGrid(nc_path=file,\r\n proj_dir=self.proj_dir,\r\n spatial_param=self.spatial_param,\r\n temporal_param=self.temporal_param)\r\n\r\n # Classify event perimeters\r\n perims = builder.get_event_perimeters()\r\n\r\n # Remove empty perimeters\r\n perims = [p for p in perims if type(p.coords[0]) is not str]\r\n tile_list.append(perims)\r\n\r\n # Extract just the event ID, days, and x,y MODIS coordinates\r\n plist = [(p.get_event_id(), p.coords) for p in perims]\r\n\r\n # Identify edge cases, so either x or y is within 5 cells\r\n # make in if statement to not do this if there's only one tile\r\n maxys = builder.data_set[\"y\"].data[:builder.spatial_param]\r\n minys = builder.data_set[\"y\"].data[-builder.spatial_param:]\r\n maxxs = builder.data_set[\"x\"].data[-builder.spatial_param:]\r\n minxs = builder.data_set[\"x\"].data[:builder.spatial_param]\r\n yedges = list(maxys) + list(minys)\r\n xedges = list(maxxs) + list(minxs)\r\n\r\n # Create an empty data frame\r\n print(\"Building data frame...\")\r\n events = []\r\n coords = []\r\n edges = []\r\n ys = []\r\n xs = []\r\n dates = []\r\n for p in plist:\r\n coord = [list(c) for c in p[1]]\r\n # if(len(tile_list>1)): edge = [edgeCheck(yedges, xedges, c, self.sp_buf) for c in coord]\r\n # else: edge = 0\r\n edge = [edgeCheck(yedges, xedges, c, self.sp_buf) for c in coord]\r\n if any(edge):\r\n edge = [True for e in edge]\r\n event = list(np.repeat(p[0], len(coord)))\r\n y = [c[0] for c in coord]\r\n x = [c[1] for c in coord]\r\n date = [base + dt.timedelta(c[2]) for c in coord]\r\n events.append(event)\r\n coords.append(coord)\r\n edges.append(edge)\r\n ys.append(y)\r\n xs.append(x)\r\n dates.append(date)\r\n\r\n # Flatten each list of lists\r\n events = flttn(events)\r\n coords = flttn(coords)\r\n edges = flttn(edges)\r\n ys = flttn(ys)\r\n xs = flttn(xs)\r\n dates = flttn(dates)\r\n edf = pd.DataFrame(\r\n OrderedDict({\"id\": events, \"date\": dates, \"x\": xs,\r\n \"y\": ys, \"edge\": edges, \"tile\": tile_id})\r\n )\r\n if not os.path.exists(os.path.join(self.proj_dir, \"tables/events\")):\r\n os.mkdir(os.path.join(self.proj_dir, \"tables/events\"))\r\n edf.to_csv(\r\n os.path.join(\r\n self.proj_dir, \"tables/events/\" + tile_id + \".csv\"),\r\n index=False)\r\n\r\n # Clear memory\r\n gc.collect()\r\n\r\n # Now read in the event data frames (use dask, instead, save memory)\r\n print(\"Reading saved event tables back into memory...\")\r\n efiles = glob(os.path.join(self.proj_dir, \"tables/events/*csv\"))\r\n efiles = [e for e in efiles if e[-10:-4] in self.tiles]\r\n edfs = [pd.read_csv(e) for e in efiles]\r\n\r\n # Merge with existing records\r\n print(\"Concatenating event tables...\")\r\n df = pd.concat(edfs)\r\n\r\n def toDays(date, base):\r\n date = dt.datetime.strptime(date, \"%Y-%m-%d\")\r\n delta = (date - base)\r\n days = delta.days\r\n return days\r\n\r\n print(\"Creating unique ids...\")\r\n df[\"id\"] = df[\"tile\"] + \"_\" + df[\"id\"].astype(str)\r\n\r\n print(\"Converting days since 1970 to dates...\")\r\n df[\"days\"] = df[\"date\"].apply(toDays, base=base)\r\n\r\n # Cut the edge events out into a separate df\r\n print(\"Separating tile edge events from center events...\")\r\n edges = df[df[\"edge\"] == True]\r\n not_edges = df[df[\"edge\"] == False]\r\n\r\n # Merge where needed\r\n print(\"Merging edge-case tile events...\")\r\n eids = list(edges[\"id\"].unique())\r\n for iden in tqdm(eids, position=0, file=sys.stdout):\r\n # Split, one vs all\r\n edf = edges[edges[\"id\"] == iden]\r\n edf2 = edges[edges[\"id\"] != iden]\r\n days = edf[\"days\"]\r\n\r\n # Sometimes these are empty\r\n try:\r\n d1 = min(days)\r\n d2 = max(days)\r\n except Exception:\r\n pass\r\n\r\n # If events aren't close enough in time the list will be empty\r\n edf2 = edf2[(abs(edf2[\"days\"] - d1) < self.temporal_param) | (abs(edf2[\"days\"] - d2) < self.temporal_param)]\r\n eids2 = list(edf2[\"id\"].unique())\r\n\r\n # If there are event close in time, are they close in space?\r\n for iden2 in eids2:\r\n edf2 = edges[edges[\"id\"] == iden2]\r\n ydiffs = [y - edf2[\"y\"].values for y in edf[\"y\"].values]\r\n xdiffs = [x - edf2[\"x\"].values for x in edf[\"x\"].values]\r\n ychecks = [spCheck(yds, self.sp_buf) for yds in ydiffs]\r\n xchecks = [spCheck(xds, self.sp_buf) for xds in xdiffs]\r\n checks = [ychecks[i] * xchecks[i] for i in range(len(ychecks))]\r\n if any(checks):\r\n # Merge events! Merge into the earliest event\r\n # MC - Need to replace this with the event builder on these points (?)\r\n d12 = edf2[\"days\"].min()\r\n if d1 < d12:\r\n edges[\"id\"][edges[\"id\"] == iden2] = iden\r\n else:\r\n edges[\"id\"][edges[\"id\"] == iden] = iden2\r\n\r\n # Concatenate edge df back into main df\r\n print(\"Recombining edge and center cases...\")\r\n df = pd.concat([not_edges, edges])\r\n\r\n # Reset id values in chronological order\r\n print(\"Resetting ids in chronological order..\")\r\n df[\"first\"] = df.groupby(\"id\").days.transform(\"min\")\r\n firsts = df[[\"id\", \"first\"]].drop_duplicates()\r\n firsts = firsts.sort_values(\"first\")\r\n firsts[\"new_id\"] = range(1, firsts.shape[0] + 1)\r\n idmap = dict(zip(firsts[\"id\"], firsts[\"new_id\"]))\r\n df[\"id\"] = df[\"id\"].map(idmap)\r\n df = df.sort_values(\"id\")\r\n\r\n # put these in order\r\n df = df[[\"id\", \"tile\", \"date\", \"x\", \"y\"]]\r\n\r\n # Finally save\r\n print(\"Saving merged event table to \" + self.file_name)\r\n df.to_csv(self.file_name, index=False)", "def _write_events(self):\n misc_data = {\n \"Marked\": \"Marked=0\", \"Name\": \"Speaker\", \"MarginL\": \"0\",\n \"MarginR\": \"0\", \"MarginV\": \"0\", \"Effect\": \"\",\n }\n for (name, data) in self.events.items():\n data.update(misc_data)\n line = \"Dialogue: {Marked},{Start},{End},{Style},\" \\\n \"{Name},{MarginL},{MarginR},{MarginV},{Effect},{Text}\" \\\n \"\\n\".format(Style=name, **data)\n self.Events += line", "def many_events(start_time,end_time,subevent_bools):\r\n \r\n #running through for each event\r\n for j in range(len(start_time)):\r\n \r\n #start, end, and subevent bool for this event\r\n st = start_time[j]\r\n et = end_time[j]\r\n subevent = bool(subevent_bools[j])\r\n \r\n #checking if start time is actually available\r\n if str(st) != 'nan':\r\n try:\r\n st = parse(st)\r\n yes_st = True\r\n except ValueError:\r\n yes_st = False\r\n else:\r\n yes_st = False\r\n \r\n #checking if end time is actually available\r\n if str(et) != 'nan':\r\n try:\r\n et = parse(et)\r\n yes_et = True\r\n except ValueError:\r\n yes_et = False\r\n else:\r\n yes_et = False\r\n \r\n #if both start and end times are available, running the code\r\n if yes_st and yes_et:\r\n #event must be after Nov. 2010 because currently no capability for\r\n #instruments in use before then - change this if you have that\r\n #capability\r\n if st > datetime(2010,9,1):\r\n try:\r\n print('got start and end times! running database extraction') \r\n database_extraction(st,et,instrument_chosen,subevent)\r\n except:\r\n continue\r\n else:\r\n print('cannot run for events before November 2010 because do not have '\r\n 'access to instruments before then')", "def export_state_events_as_textgrid(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Export events as TextGrid\", os.path.expanduser('~'),\n options=QFileDialog(self).ShowDirsOnly)\n if not exportDir:\n return\n\n for obsId in selectedObservations:\n\n out = \"\"\"File type = \"ooTextFile\"\nObject class = \"TextGrid\"\n\nxmin = 0\nxmax = 98.38814058956916\ntiers? <exists>\nsize = {subjectNum}\nitem []:\n\"\"\"\n subjectheader = \"\"\" item [{subjectIdx}]:\n class = \"IntervalTier\"\n name = \"{subject}\"\n xmin = {intervalsMin}\n xmax = {intervalsMax}\n intervals: size = {intervalsSize}\n\"\"\"\n\n template = \"\"\" intervals [{count}]:\n xmin = {xmin}\n xmax = {xmax}\n text = \"{name}\"\n\"\"\"\n\n flagUnpairedEventFound = False\n '''TO BE REMOVED totalMediaDuration = round(self.observationTotalMediaLength(obsId), 3)'''\n totalMediaDuration = round(project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId]), 3)\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n cursor.execute((\"SELECT count(distinct subject) FROM events \"\n \"WHERE observation = '{}' AND subject in ('{}') AND type = 'STATE' \").format(obsId,\n \"','\".join(\n plot_parameters[\n \"selected subjects\"])))\n subjectsNum = int(list(cursor.fetchall())[0][0])\n\n subjectsMin, subjectsMax = 0, totalMediaDuration\n\n out = \"\"\"File type = \"ooTextFile\"\nObject class = \"TextGrid\"\n\nxmin = {subjectsMin}\nxmax = {subjectsMax}\ntiers? <exists>\nsize = {subjectsNum}\nitem []:\n\"\"\".format(subjectsNum=subjectsNum, subjectsMin=subjectsMin, subjectsMax=subjectsMax)\n\n subjectIdx = 0\n for subject in plot_parameters[\"selected subjects\"]:\n\n subjectIdx += 1\n\n cursor.execute(\"SELECT count(*) FROM events WHERE observation = ? AND subject = ? AND type = 'STATE' \",\n (obsId, subject))\n intervalsSize = int(list(cursor.fetchall())[0][0] / 2)\n\n intervalsMin, intervalsMax = 0, totalMediaDuration\n\n out += subjectheader\n\n cursor.execute(\n \"SELECT occurence, code FROM events WHERE observation = ? AND subject = ? AND type = 'STATE' order by occurence\",\n (obsId, subject))\n\n rows = [{\"occurence\": float2decimal(r[\"occurence\"]), \"code\": r[\"code\"]} for r in cursor.fetchall()]\n if not rows:\n continue\n\n count = 0\n\n # check if 1st behavior starts at the beginning\n\n if rows[0][\"occurence\"] > 0:\n count += 1\n out += template.format(count=count, name=\"null\", xmin=0.0, xmax=rows[0][\"occurence\"])\n\n for idx, row in enumerate(rows):\n if idx % 2 == 0:\n\n # check if events not interlacced\n if row[\"key\"] != rows[idx + 1][\"key\"]:\n QMessageBox.critical(None, programName,\n \"The events are interlaced. It is not possible to produce the Praat TextGrid file\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n return\n\n count += 1\n out += template.format(count=count, name=row[\"key\"], xmin=row[\"occurence\"],\n xmax=rows[idx + 1][\"occurence\"])\n\n # check if difference is > 0.001\n if len(rows) > idx + 2:\n if rows[idx + 2][\"occurence\"] - rows[idx + 1][\"occurence\"] > 0.001:\n\n logging.debug(\"difference: {}-{}={}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] - rows[idx + 1][\n \"occurence\"]))\n\n out += template.format(count=count + 1, name=\"null\", xmin=rows[idx + 1][\"occurence\"],\n xmax=rows[idx + 2][\"occurence\"])\n count += 1\n else:\n logging.debug(\"difference <=0.001: {} - {} = {}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] -\n rows[idx + 1][\"occurence\"]))\n rows[idx + 2][\"occurence\"] = rows[idx + 1][\"occurence\"]\n logging.debug(\"difference after: {} - {} = {}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] -\n rows[idx + 1][\"occurence\"]))\n\n # check if last event ends at the end of media file\n if rows[-1][\"occurence\"] < project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId]):\n count += 1\n out += template.format(count=count, name=\"null\", xmin=rows[-1][\"occurence\"],\n xmax=totalMediaDuration)\n\n # add info\n out = out.format(subjectIdx=subjectIdx, subject=subject, intervalsSize=count, intervalsMin=intervalsMin,\n intervalsMax=intervalsMax)\n\n try:\n with open(\"{exportDir}{sep}{obsId}.textGrid\".format(exportDir=exportDir, sep=os.sep, obsId=obsId),\n \"w\") as f:\n f.write(out)\n\n if flagUnpairedEventFound:\n QMessageBox.warning(self, programName,\n \"Some state events are not paired. They were excluded from export\", \\\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n self.statusbar.showMessage(\"Events exported successfully\", 10000)\n\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False):\n # Start by filling all data that we know into an ordered dictionary\n first_samp = raw.first_samp\n sfreq = raw.info[\"sfreq\"]\n events = events.copy()\n events[:, 0] -= first_samp\n\n # Onset column needs to be specified in seconds\n data = OrderedDict(\n [\n (\"onset\", events[:, 0] / sfreq),\n (\"duration\", durations),\n (\"trial_type\", None),\n (\"value\", events[:, 2]),\n (\"sample\", events[:, 0]),\n ]\n )\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n data[\"trial_type\"] = [trial_type_map.get(i, \"n/a\") for i in events[:, 2]]\n else:\n del data[\"trial_type\"]\n\n _write_tsv(fname, data, overwrite)", "def _save_events_summary(self):\n for name, events in self._events.items():\n dict_events = [event.to_dict() for event in events]\n dump_data(dict_events, self._make_event_filename(name))", "def write_events(self, array_of_events, is_raw_string=False):\n\n # Open the stash file\n stash_file = self.get_file_name()\n stash_file_h = open(stash_file, 'a')\n\n # Write the header\n stash_file_h.write(self.get_header())\n\n # Write the line_breaker\n stash_file_h.write(self.LINE_BREAKER)\n stash_file_h.write(\"\\n\")\n\n if self.sourcetype is not None:\n stash_file_h.write('sourcetype=\\\"' + self.sourcetype + '\\\"')\n\n # Write out the events\n for event in array_of_events:\n\n if is_raw_string:\n stash_file_h.write(event)\n else:\n stash_file_h.write(self.event_to_string(event))\n\n stash_file_h.write(\"\\n\")\n\n # Close the file\n stash_file_h.close()\n\n # Return the file name.\n return stash_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given lists of peak fluxes for protons >10 MeV and >100 MeV, creates a boolean for whether or not each event is a subevent (doesn't cross a threshold)
def gen_subevent_bools(p_10,p_100): #list of subevent booleans subevent_bools = [] #extracting 10 MeV peak flux if it exists for j in range(len(p_10)): try: p10 = float(p_10[j]) except ValueError: p10 = 'nan' #extracting 100 MeV peak flux if it exists try: p100 = float(p_100[j]) except ValueError: p100 = 'nan' #checking if peak fluxes exist if str(p10) != 'nan' and str(p100) != 'nan': #if the peak fluxes both exist and >10 MeV is both below threshold, #subevent is true (only care about >10 bc of definition of subevent) if p10 < 10: subevent_bools.append(True) elif p10 > 10: subevent_bools.append(False) #if >10 MeV doesn't exist, subevent is true else: subevent_bools.append(True) return(subevent_bools)
[ "def filter_with_peaks(\n self,\n peak_list: list,\n both_peak_support: bool = False\n ) -> bool:\n\n start_time = time.time()\n\n num_peaks = len(peak_list)\n min_peak_value = peak_list[-1][PEAK_MAX_VALUE_INDEX]\n\n log.info(f\"Filtering {self.sample_name} {self.name} with \"\n f\"{num_peaks} peaks...\")\n log.debug(f\"Top peaks: {peak_list[:3]}\")\n log.debug(f\"Bottom peaks: {peak_list[-3:]}\")\n log.debug(f'Min peak value: {min_peak_value}')\n\n # Get the coverage of each wanted peak\n # Could be used to find the specific peaks for every loop\n index_array = np.zeros(self.size, dtype=np.uint16)\n\n if num_peaks >= MAX_USHRT:\n log.warning(f'Number of peaks: {num_peaks} is greater than max_unsigned_short: {MAX_USHRT}')\n for i in range(num_peaks):\n peak_start = peak_list[i][0]\n peak_end = peak_list[i][1]\n index_array[peak_start:peak_end] = i + 1\n\n log.debug(f'Time: {time.time() - start_time}')\n\n numb_deleted = 0\n removed_loop_lengths = []\n removed_loop_values = []\n kept_loop_lengths = []\n self.kept_indexes = []\n self.peak_indexes = [[], []]\n self.filtered_start = []\n self.filtered_end = []\n self.filtered_values = []\n self.filtered_anchors = []\n\n for i in range(self.numb_loops):\n loop_start = self.start_list[i]\n loop_end = self.end_list[i]\n loop_value = self.value_list[i]\n\n if loop_start > loop_end:\n temp_val = loop_start\n loop_start = loop_end\n loop_end = temp_val\n\n if loop_value == 0:\n continue\n\n if both_peak_support:\n to_keep = index_array[loop_start] and index_array[loop_end]\n else:\n to_keep = index_array[loop_start] or index_array[loop_end]\n\n if not to_keep:\n removed_loop_values.append(loop_value)\n numb_deleted += 1\n removed_loop_lengths.append(loop_end - loop_start)\n continue\n\n self.filtered_start.append(loop_start)\n self.filtered_end.append(loop_end)\n self.filtered_values.append(loop_value)\n self.filtered_anchors.append([self.start_anchor_list[0][i],\n self.start_anchor_list[1][i],\n self.start_list_peaks[i]])\n self.filtered_anchors.append([self.end_anchor_list[0][i],\n self.end_anchor_list[1][i],\n self.start_list_peaks[i]])\n self.kept_indexes.append(i)\n\n kept_loop_lengths.append(loop_end - loop_start)\n\n # Unused for now\n self.peak_indexes[0].append((\n peak_list[index_array[loop_start] - 1][0],\n peak_list[index_array[loop_start] - 1][1]))\n self.peak_indexes[1].append((\n peak_list[index_array[loop_end] - 1][0],\n peak_list[index_array[loop_end] - 1][1]))\n\n self.filtered_start = np.array(self.filtered_start, dtype=np.int32)\n self.filtered_end = np.array(self.filtered_end, dtype=np.int32)\n self.filtered_values = np.array(self.filtered_values)\n self.filtered_numb_values = self.filtered_start.size\n self.kept_indexes = np.array(self.kept_indexes, dtype=np.int32)\n\n log.debug(f'Total loops: {self.numb_loops}')\n log.debug(f\"Number of loops removed: {numb_deleted}\")\n log.info(f\"Number of loops kept: {self.filtered_numb_values}\")\n\n if self.filtered_numb_values == 0:\n log.warning(f\"No loops left. Skipping\")\n return False\n\n if numb_deleted > 0:\n log.debug(\n f'Avg loop length removed: {np.mean(removed_loop_lengths)}')\n log.debug(f'Avg loop value removed: {np.mean(removed_loop_values)}')\n else:\n log.debug(f'Avg loop length removed: N/A')\n log.debug(f'Avg loop value removed: N/A')\n log.debug(f'Avg loop length kept: {np.mean(kept_loop_lengths)}')\n log.debug(f'Avg loop value kept: {np.mean(self.filtered_values)}')\n log.debug(f'Largest loop value kept: {np.max(self.filtered_values)}')\n log.debug(f'Time taken: {time.time() - start_time}\\n')\n\n return True", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def checkPeakListValid(self):\n \n validPeaks = 0\n \n for self.rawPeak in self.peakFile.peaks:\n \n validPeaks += self.thisPeakValid()\n \n if validPeaks:\n break\n \n return validPeaks", "def checkPeaks(peaks, nwindows):\n nvalleys = len(peaks)-2\n if nvalleys != (nwindows - 1):\n print(\"Error: number of valleys should be {}! Found {}.\".format(nwindows-1, nvalleys))\n return True # Continue looping\n else:\n print(\"OK: Found {} valleys.\".format(nvalleys))\n return False # Exit the loop", "def check_thresholds_and_peaks(v, t, spike_indexes, peak_indexes, upstroke_indexes, end=None,\n max_interval=0.005, thresh_frac=0.05, filter=10., dvdt=None):\n\n if not end:\n end = t[-1]\n\n overlaps = np.flatnonzero(spike_indexes[1:] <= peak_indexes[:-1])\n if overlaps.size:\n spike_mask = np.ones_like(spike_indexes, dtype=bool)\n spike_mask[overlaps + 1] = False\n spike_indexes = spike_indexes[spike_mask]\n\n peak_mask = np.ones_like(peak_indexes, dtype=bool)\n peak_mask[overlaps] = False\n peak_indexes = peak_indexes[peak_mask]\n\n upstroke_mask = np.ones_like(upstroke_indexes, dtype=bool)\n upstroke_mask[overlaps] = False\n upstroke_indexes = upstroke_indexes[upstroke_mask]\n\n # Validate that peaks don't occur too long after the threshold\n # If they do, try to re-find threshold from the peak\n too_long_spikes = []\n for i, (spk, peak) in enumerate(zip(spike_indexes, peak_indexes)):\n if t[peak] - t[spk] >= max_interval:\n logging.info(\"Need to recalculate threshold-peak pair that exceeds maximum allowed interval ({:f} s)\".format(max_interval))\n too_long_spikes.append(i)\n\n if too_long_spikes:\n if dvdt is None:\n dvdt = calculate_dvdt(v, t, filter)\n avg_upstroke = dvdt[upstroke_indexes].mean()\n target = avg_upstroke * thresh_frac\n drop_spikes = []\n for i in too_long_spikes:\n # First guessing that threshold is wrong and peak is right\n peak = peak_indexes[i]\n t_0 = find_time_index(t, t[peak] - max_interval)\n below_target = np.flatnonzero(dvdt[upstroke_indexes[i]:t_0:-1] <= target)\n if not below_target.size:\n # Now try to see if threshold was right but peak was wrong\n\n # Find the peak in a window twice the size of our allowed window\n spike = spike_indexes[i]\n t_0 = find_time_index(t, t[spike] + 2 * max_interval)\n new_peak = np.argmax(v[spike:t_0]) + spike\n\n # If that peak is okay (not outside the allowed window, not past the next spike)\n # then keep it\n if t[new_peak] - t[spike] < max_interval and \\\n (i == len(spike_indexes) - 1 or t[new_peak] < t[spike_indexes[i + 1]]):\n peak_indexes[i] = new_peak\n else:\n # Otherwise, log and get rid of the spike\n logging.info(\"Could not redetermine threshold-peak pair - dropping that pair\")\n drop_spikes.append(i)\n# raise FeatureError(\"Could not redetermine threshold\")\n else:\n spike_indexes[i] = upstroke_indexes[i] - below_target[0]\n\n\n if drop_spikes:\n spike_indexes = np.delete(spike_indexes, drop_spikes)\n peak_indexes = np.delete(peak_indexes, drop_spikes)\n upstroke_indexes = np.delete(upstroke_indexes, drop_spikes)\n\n # Check that last spike was not cut off too early by end of stimulus\n # by checking that the membrane potential returned to at least the threshold\n # voltage - otherwise, drop it\n clipped = np.zeros_like(spike_indexes, dtype=bool)\n end_index = find_time_index(t, end)\n if len(spike_indexes) > 0 and not np.any(v[peak_indexes[-1]:end_index + 1] <= v[spike_indexes[-1]]):\n logging.debug(\"Failed to return to threshold voltage (%f) after last spike (min %f) - marking last spike as clipped\", v[spike_indexes[-1]], v[peak_indexes[-1]:end_index + 1].min())\n clipped[-1] = True\n\n return spike_indexes, peak_indexes, upstroke_indexes, clipped", "def test_peak_detection(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, _) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_sms = utilFunctions.peakDetection(mx, self.sm.t)\n for j, (p, p_s) in enumerate(itertools.zip_longest(ploc, ploc_sms)):\n with self.subTest(frame=i, peak_n=j):\n self.assertEqual(p, p_s)", "def test_peakDetect():\n from peakDetection import peakDetect\n from readCSV import readCsv\n from processECG import movingAverage\n from processECG import subtractDC\n [t, v] = readCsv(r'test_data1.csv')\n svf = subtractDC(v)\n invfssum = 0.0\n for i in range(len(t) - 1):\n invfssum = invfssum + (t[i + 1] - t[i])\n invfs = invfssum / (len(t) - 1)\n movingavgwindow = int(.085 / invfs)\n avgvolt = movingAverage(t, svf, movingavgwindow)\n peaks = peakDetect(t, avgvolt)\n correctbeatnum = 36\n assert len(peaks) == correctbeatnum", "def check_star(peaks,data):\n star = 0\n for i in peaks:\n max = data[i]\n if i<3 or i+4>data.size:\n continue\n mean = data[i-3:i+4].mean()\n if (max-mean)<0.1*max:\n star += 1\n if star*2>peaks.size:\n return True\n else:\n return False", "def isPeakAssigned(peak, fully=True):\n\n n = 0\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) > 0:\n n +=1\n \n if n == len(peak.peakDims):\n return True\n \n elif n > 0:\n if fully:\n return False\n else:\n return True\n \n else:\n return False", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def contains_subsamples(self) -> bool:\n return False", "def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False", "def onset_backtrack(events: np.ndarray, energy: np.ndarray) -> np.ndarray:\n # Find points where energy is non-increasing\n # all points: energy[i] <= energy[i-1]\n # tail points: energy[i] < energy[i+1]\n minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) & (energy[1:-1] < energy[2:]))\n\n # Pad on a 0, just in case we have onsets with no preceding minimum\n # Shift by one to account for slicing in minima detection\n minima = util.fix_frames(1 + minima, x_min=0)\n\n # Only match going left from the detected events\n results: np.ndarray = minima[util.match_events(events, minima, right=False)]\n return results", "def test_peak_refinement(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, px) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_i, pmag_i, pph_i = sample_dsp.peak_refine(ploc, mx, px) # pylint: disable=W0632\n ploc_i_sms, pmag_i_sms, pph_i_sms = utilFunctions.peakInterp(mx, px, ploc)\n with self.subTest(frame=i, value=\"location\"):\n self.assert_almost_equal_rmse(ploc_i, ploc_i_sms)\n with self.subTest(frame=i, value=\"magnitude\"):\n self.assert_almost_equal_rmse(pmag_i, pmag_i_sms)\n with self.subTest(frame=i, value=\"phase\"):\n self.assert_almost_equal_rmse(pph_i, pph_i_sms)", "def isPossibleSubsumer(self):\n if self.action_cnt > cons.theta_sub and self.error < cons.err_sub: #self.prediction < cons.err_sub: (why does it work?)\n return True\n return False", "def is_subset_of(self, uspec):\n \n if self.is_power_onoff() or uspec.is_power_onoff():\n return False\n \n if (uspec.is_bias() or not uspec.is_calib()) and self['speed'] != uspec['speed']:\n return False\n\n if int(self['x_bin']) % int(uspec['x_bin']) != 0 or int(self['y_bin']) % int(uspec['y_bin']) != 0:\n return False\n\n if self.number_windows() > 0:\n\n if not uspec.contains_window(self['x1_start'], self['y1_start'], self['x1_size'], self['y1_size'], self['x_bin'], self['y_bin']):\n return False\n\n if self.number_windows() > 1:\n\n if not uspec.contains_window(self['x2_start'], self['y2_start'], self['x2_size'], self['y2_size'], self['x_bin'], self['y_bin']):\n return False\n\n return True", "def test_large_peaks(self):\n\n \n \n data = [1,1,2,1,1,1,1,1,1,1] #len of 10\n xvals = arange(len(data))\n fitter = Classic(xvals, data, max_width=5, min_width=3, max_gap=10)\n \n \n starts_and_stops = fitter.peaks(False)\n self.assertListEqual(starts_and_stops, [(0,5, 2), (5,10, 5)])", "def findPeaks(spec,numberOfPeaks,verbose = False):#{{{\n print \"'findPeaks()' DEPRECIATED use 'findPeaksSequential()'\"\n peaks = []\n valleys = []\n hrf = linspace(spec.getaxis('field').min(),spec.getaxis('field').max(),10000)\n smash = spec.copy().interp('field',hrf).runcopy(real) # use an interpolated higher res spec to get a more accurate esitmate of linewidth\n hrs = smash.copy()\n #smash -= average(spec.data)\n for i in range(numberOfPeaks): \n peak = smash.data.argmax()\n peaks.append(peak)\n valley = smash.data.argmin()\n valleys.append(valley)\n # remove from peak\n #find the high bound\n notCrossed=True\n count = 0\n dimSize = len(smash.data)\n while notCrossed:\n if peak + count <= 0:\n lowBound = peak+count\n notCrossed = False\n else:\n if float(smash['field',peak+count].data) <= 0.0:\n lowBound = peak+count\n notCrossed = False\n count-=1\n # find the low bound\n notCrossed=True\n count=0\n while notCrossed:\n if peak + count >= dimSize: # check to make sure you haven't wandered off the spectrum\n highBound = peak+count\n notCrossed = False\n else:\n if float(smash['field',peak+count].data) <= 0.0:\n highBound = peak+count\n notCrossed = False\n count+=1\n smash['field',lowBound:highBound] = 0.0\n\n # remove from valley\n #find the high bound\n notCrossed=True\n count = 0\n while notCrossed:\n if valley + count <= 0:\n lowBound = valley+count\n notCrossed = False\n else:\n if float(smash['field',valley+count].data) >= 0.0:\n lowBound = valley+count\n notCrossed = False\n count-=1\n # find the low bound\n notCrossed=True\n count=0\n while notCrossed:\n if valley + count >= dimSize: # check to make sure you haven't wandered off the spectrum\n highBound = valley+count\n notCrossed = False\n else:\n if float(smash['field',valley+count].data) >= 0.0:\n highBound = valley+count\n notCrossed = False\n count+=1\n smash['field',lowBound:highBound] = 0.0\n if verbose:\n pys.plot(smash)\n peak = pys.nddata(hrs.data[peaks]).rename('value','field').labels('field',hrs.getaxis('field')[peaks])\n valley = pys.nddata(hrs.data[valleys]).rename('value','field').labels('field',hrs.getaxis('field')[valleys])\n # Calculate relevant parameters\n peak.sort('field')\n valley.sort('field')\n return peak,valley", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes in lists of start times and end times to create a list of time windows, and a list of whether or not an event is a subevent, and uses those lists to run functions that extract data from the GOES database. Each list must have the same length, and indices of lists must correspond (ie start_time[j] has an end time of end_time[j] and its subevent boolean is subevent_bools[j]). not to be confused with multi_events, which generates output given multiple events within one time window.
def many_events(start_time,end_time,subevent_bools): #running through for each event for j in range(len(start_time)): #start, end, and subevent bool for this event st = start_time[j] et = end_time[j] subevent = bool(subevent_bools[j]) #checking if start time is actually available if str(st) != 'nan': try: st = parse(st) yes_st = True except ValueError: yes_st = False else: yes_st = False #checking if end time is actually available if str(et) != 'nan': try: et = parse(et) yes_et = True except ValueError: yes_et = False else: yes_et = False #if both start and end times are available, running the code if yes_st and yes_et: #event must be after Nov. 2010 because currently no capability for #instruments in use before then - change this if you have that #capability if st > datetime(2010,9,1): try: print('got start and end times! running database extraction') database_extraction(st,et,instrument_chosen,subevent) except: continue else: print('cannot run for events before November 2010 because do not have ' 'access to instruments before then')
[ "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def compile_chrono_events(\n test_scenario: SimulationTestScenario, setup_events: List[SimulationEvent]\n) -> Tuple[List[SimulationEvent], Tuple[str, datetime]]:\n previous_subtest_last_event_ts = datetime.min.replace(tzinfo=timezone.utc)\n previous_subtest_last_assertion_ts = datetime.min.replace(tzinfo=timezone.utc)\n current_subtest_first_event_ts = datetime.max.replace(tzinfo=timezone.utc)\n current_subtest_first_assertion_ts = datetime.max.replace(tzinfo=timezone.utc)\n assertion_ts = []\n events = []\n derived_param_outputs = []\n\n for sub_test in test_scenario.sub_tests:\n if sub_test.events:\n current_subtest_first_event_ts = sub_test.events[0].time\n\n if current_subtest_first_event_ts < previous_subtest_last_event_ts:\n log.warning(\n f'Subtest \"{sub_test.description}\" contains '\n \"event timestamp before the previous one.\"\n )\n\n previous_subtest_last_event_ts = sub_test.events[-1].time\n events.extend(sub_test.events)\n\n if sub_test.expected_balances_at_ts:\n assertion_ts.extend(sub_test.expected_balances_at_ts.keys())\n\n if sub_test.expected_posting_rejections:\n assertion_ts.extend(\n expected_rejection.timestamp\n for expected_rejection in sub_test.expected_posting_rejections\n )\n if sub_test.expected_schedules:\n assertion_ts.extend(\n runtime\n for expected_schedule in sub_test.expected_schedules\n for runtime in expected_schedule.run_times\n )\n if sub_test.expected_workflows:\n assertion_ts.extend(\n runtime\n for expected_workflow in sub_test.expected_workflows\n for runtime in expected_workflow.run_times\n )\n\n if sub_test.expected_derived_parameters:\n for expected_derived_param in sub_test.expected_derived_parameters:\n assertion_ts.append(expected_derived_param.timestamp)\n derived_param_outputs.append(\n (\n expected_derived_param.account_id,\n expected_derived_param.timestamp,\n )\n )\n\n if assertion_ts:\n sorted_assertion_ts = sorted(assertion_ts)\n current_subtest_first_assertion_ts = sorted_assertion_ts[0]\n\n if current_subtest_first_assertion_ts < previous_subtest_last_assertion_ts:\n log.warning(\n f'Subtest \"{sub_test.description}\" contains '\n \"assertion timestamp before the previous one.\"\n )\n\n previous_subtest_last_assertion_ts = sorted_assertion_ts[-1]\n assertion_ts.clear()\n\n if (\n previous_subtest_last_event_ts > test_scenario.end\n or previous_subtest_last_assertion_ts > test_scenario.end\n ):\n log.warning(\"last assertion or event happens outside of simulation window\")\n\n if setup_events and events and setup_events[-1].time > events[0].time:\n raise ValueError(\n f\"First custom event at {events[0].time}, it needs to be after \"\n f\"{setup_events[-1].time}, when account and plan setup events are complete\"\n )\n\n return setup_events + events, derived_param_outputs", "def subdivide(events: list, splits: list) -> list:\n formatted_events = []\n\n for i in range(len(splits)-1):\n formatted_event = {}\n formatted_event[\"temporalRange\"] = [splits[i], splits[i + 1]]\n # Get the events of the enclosing event (it must always be one)\n for event in events:\n tr = event[\"temporalRange\"]\n if tr[0] <= splits[i] and tr[1] >= splits[i+1]:\n formatted_event[\"events\"] = event[\"events\"]\n # Adding the formatted event to the return result\n formatted_events.append(formatted_event)\n\n return formatted_events", "def extract_events(trajectories, traj_idx=0, cumulative_events_description=[]):\n\n def find_last_event(events, event_type, player_num, last_timestep=None):\n for event in reversed(events):\n if event.get(\"player\") == player_num and event.get(\"action\") == event_type \\\n and (last_timestep is None or event[\"timestep\"] < last_timestep):\n return event\n \n def holding_event(events, player_num, timestep, obj):\n last_pickup_t = (find_last_event(events, \"pickup\", player_num, timestep) or {}).get(\"timestep\")\n\n start_t = last_pickup_t or 0\n event = {\n \"player\": player_num,\n \"start_timestep\":start_t,\n \"end_timestep\":timestep,\n \"action\": \"holding\",\n \"object\": obj\n }\n return event\n\n def get_holding_events(events, game_states):\n holding_events = [holding_event(events, event.get(\"player\"), event.get(\"timestep\"), event.get(\"object\")) \n for event in events if event.get(\"action\") in [\"drop\", \"potting\", \"delivery\"]]\n # check if object is held at end of the episode\n for player_num, player in enumerate(game_states[-1].players):\n if player.held_object:\n holding_events.append(holding_event(events, player_num, len(game_states)-1, player.held_object.to_dict()))\n return holding_events\n\n def end_of_episode_event(game_states):\n # event used for determining size of x axis\n event = {\n \"timestep\": len(game_states)-1,\n \"action\": \"end_of_episode\"\n }\n return event\n\n def get_cumulative_events(events, last_timestep, cumulative_events_description):\n \"\"\"\n Receives events for scatter plot and returns events for cumulative plot\n \"\"\"\n def add_cumulative_event(events, events_sum, timestep, actions=None, adjectives=None, player_num=None, obj=None):\n event = {\"sum\": events_sum,\n \"timestep\": timestep}\n\n if actions is not None: event[\"actions\"] = actions\n if adjectives is not None: event[\"adjectives\"] = adjectives\n if player_num is not None: event[\"player\"] = player_num\n if obj is not None: event[\"object\"] = obj\n events.append(event)\n \n def is_matching_adjectives(event, adjectives):\n event_adjectives = event.get(\"adjectives\", [])\n if not adjectives: # assuming that there was no supplied adjectives because all are allowed\n return True\n no_adjectives_allowed = None in adjectives or \"\" in adjectives\n if no_adjectives_allowed and not event_adjectives:\n return True\n return bool(set(event_adjectives).intersection(adjectives))\n\n def is_matching_actions(event, actions):\n if not actions: # assuming that there was no supplied actions because all are allowed\n return True\n return event.get(\"action\") in actions\n \n cumulative_events = []\n\n for description in cumulative_events_description:\n actions = description.get(\"actions\")\n adjectives = description.get(\"adjectives\")\n all_events_sum = 0\n player_sums = defaultdict(int)\n # add zero timestep events\n first_timestep = 0\n add_cumulative_event(cumulative_events, all_events_sum, first_timestep, actions, adjectives)\n for player_num in set(map(lambda e: e[\"player\"], events)):\n add_cumulative_event(cumulative_events, all_events_sum, first_timestep, actions, adjectives, player_num)\n\n for event in filter(lambda e: is_matching_actions(e, actions) and is_matching_adjectives(e, adjectives), events):\n player_num = event[\"player\"]\n timestep = event[\"timestep\"]\n all_events_sum +=1\n player_sums[player_num] +=1\n add_cumulative_event(cumulative_events, all_events_sum, timestep, actions, adjectives)\n add_cumulative_event(cumulative_events, player_sums[player_num], timestep, actions, adjectives, player_num)\n\n # add cumulative events at last timestep for graph ending with the last timestep\n add_cumulative_event(cumulative_events, all_events_sum, last_timestep, actions, adjectives)\n for player_num in set(map(lambda x: x[\"player\"], events)):\n add_cumulative_event(cumulative_events, player_sums[player_num], last_timestep, actions, adjectives, player_num)\n return cumulative_events\n \n ep_states = trajectories[\"ep_states\"][traj_idx]\n events = copy.deepcopy(trajectories[\"ep_infos\"][0][-1][\"episode\"][\"events_list\"])\n \n events += get_holding_events(events, ep_states)\n\n if cumulative_events_description:\n events += get_cumulative_events(events, len(ep_states), cumulative_events_description)\n\n events.append(end_of_episode_event(ep_states))\n # clearing possible numpy data types from data to allow json.dumps of the data\n events = [{k:numpy_to_native(v) for k,v in event.items()} for event in events]\n \n return events", "def get_event_time_boundaries(event, performances):\n performances.sort(key=lambda p: p['start_time'])\n event_date = event['start_date']\n if performances:\n start_dt = timezone.datetime.combine(event_date, performances[0]['start_time'])\n end_dt = timezone.datetime.combine(event_date, performances[-1]['end_time'])\n else:\n start_dt = timezone.datetime.combine(event_date, datetime.time())\n start_dt = start_dt.replace(tzinfo=timezone.utc)\n end_dt = start_dt.replace(hour=23, minute=59, second=59)\n return start_dt, end_dt", "def events(time):\n\n event_list = eventlist()\n idx = np.all(time == event_list[:, 0:len(time)], axis=1)\n return event_list[idx,:]", "def parseEvents(data, times, eventTimes):\n striped = []\n remaining = range(len(times))\n stripedEvents = []\n\n for t in eventTimes:\n tmpEvent = t.date()\n for j in range(len(times)):\n tmpTime = times[j].date()\n\n if tmpEvent == tmpTime:\n striped.append(tmpEvent)\n stripedEvents.append(data[j, :])\n remaining.remove(j)\n break\n\n stripedEvents = np.array(stripedEvents)\n remainingTimes = np.array(remaining)\n stripedTimes = np.array(striped)\n remainingEvents = data[remaining]\n\n return stripedTimes, remainingTimes, stripedEvents, remainingEvents", "async def test_overlap_events(\n hass: HomeAssistant,\n calls: Callable[[], list[dict[str, Any]]],\n fake_schedule: FakeSchedule,\n) -> None:\n\n event_data1 = fake_schedule.create_event(\n start=datetime.datetime.fromisoformat(\"2022-04-19 11:00:00+00:00\"),\n end=datetime.datetime.fromisoformat(\"2022-04-19 11:30:00+00:00\"),\n )\n event_data2 = fake_schedule.create_event(\n start=datetime.datetime.fromisoformat(\"2022-04-19 11:15:00+00:00\"),\n end=datetime.datetime.fromisoformat(\"2022-04-19 11:45:00+00:00\"),\n )\n async with create_automation(hass, EVENT_START):\n await fake_schedule.fire_until(\n datetime.datetime.fromisoformat(\"2022-04-19 11:20:00+00:00\")\n )\n\n assert calls() == [\n {\n \"platform\": \"calendar\",\n \"event\": EVENT_START,\n \"calendar_event\": event_data1,\n },\n {\n \"platform\": \"calendar\",\n \"event\": EVENT_START,\n \"calendar_event\": event_data2,\n },\n ]", "def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False", "def overlap_events(event1, event2, place1, place2, log_places):\n place1.start_event(event1)\n log_conflicts(event1.start_time, log_places)\n place2.start_event(event2)\n log_conflicts(event2.start_time, log_places)\n place1.end_event(event1)\n log_conflicts(event1.end_time, log_places)\n place2.end_event(event2)\n log_conflicts(event2.end_time, log_places)", "def buildEvents(self):\r\n # Make sure the destination folder exists\r\n if not(os.path.exists(os.path.dirname(self.file_name))):\r\n print(self.file_name)\r\n os.makedirs(os.path.dirname(self.file_name))\r\n\r\n # Create an empty list and data frame for the\r\n tile_list = []\r\n columns = [\"id\", \"date\", \"x\", \"y\", \"edge\", \"tile\"]\r\n df = pd.DataFrame(columns=columns)\r\n base = dt.datetime(1970, 1, 1)\r\n\r\n # Loop through each netcdf file and build individual tile events\r\n for file in self.files:\r\n tile_id = file[-9:-3]\r\n if os.path.exists(\r\n os.path.join(\r\n self.proj_dir, \"tables/events/\" + tile_id + \".csv\")\r\n ):\r\n print(tile_id + \" event table exists, skipping...\")\r\n elif not os.path.exists(\r\n os.path.join(self.proj_dir, \"rasters/burn_area/netcdfs/\" + tile_id + \".nc\")):\r\n pass\r\n else:\r\n print(\"\\n\" + tile_id)\r\n\r\n # Create a new event object\r\n builder = EventGrid(nc_path=file,\r\n proj_dir=self.proj_dir,\r\n spatial_param=self.spatial_param,\r\n temporal_param=self.temporal_param)\r\n\r\n # Classify event perimeters\r\n perims = builder.get_event_perimeters()\r\n\r\n # Remove empty perimeters\r\n perims = [p for p in perims if type(p.coords[0]) is not str]\r\n tile_list.append(perims)\r\n\r\n # Extract just the event ID, days, and x,y MODIS coordinates\r\n plist = [(p.get_event_id(), p.coords) for p in perims]\r\n\r\n # Identify edge cases, so either x or y is within 5 cells\r\n # make in if statement to not do this if there's only one tile\r\n maxys = builder.data_set[\"y\"].data[:builder.spatial_param]\r\n minys = builder.data_set[\"y\"].data[-builder.spatial_param:]\r\n maxxs = builder.data_set[\"x\"].data[-builder.spatial_param:]\r\n minxs = builder.data_set[\"x\"].data[:builder.spatial_param]\r\n yedges = list(maxys) + list(minys)\r\n xedges = list(maxxs) + list(minxs)\r\n\r\n # Create an empty data frame\r\n print(\"Building data frame...\")\r\n events = []\r\n coords = []\r\n edges = []\r\n ys = []\r\n xs = []\r\n dates = []\r\n for p in plist:\r\n coord = [list(c) for c in p[1]]\r\n # if(len(tile_list>1)): edge = [edgeCheck(yedges, xedges, c, self.sp_buf) for c in coord]\r\n # else: edge = 0\r\n edge = [edgeCheck(yedges, xedges, c, self.sp_buf) for c in coord]\r\n if any(edge):\r\n edge = [True for e in edge]\r\n event = list(np.repeat(p[0], len(coord)))\r\n y = [c[0] for c in coord]\r\n x = [c[1] for c in coord]\r\n date = [base + dt.timedelta(c[2]) for c in coord]\r\n events.append(event)\r\n coords.append(coord)\r\n edges.append(edge)\r\n ys.append(y)\r\n xs.append(x)\r\n dates.append(date)\r\n\r\n # Flatten each list of lists\r\n events = flttn(events)\r\n coords = flttn(coords)\r\n edges = flttn(edges)\r\n ys = flttn(ys)\r\n xs = flttn(xs)\r\n dates = flttn(dates)\r\n edf = pd.DataFrame(\r\n OrderedDict({\"id\": events, \"date\": dates, \"x\": xs,\r\n \"y\": ys, \"edge\": edges, \"tile\": tile_id})\r\n )\r\n if not os.path.exists(os.path.join(self.proj_dir, \"tables/events\")):\r\n os.mkdir(os.path.join(self.proj_dir, \"tables/events\"))\r\n edf.to_csv(\r\n os.path.join(\r\n self.proj_dir, \"tables/events/\" + tile_id + \".csv\"),\r\n index=False)\r\n\r\n # Clear memory\r\n gc.collect()\r\n\r\n # Now read in the event data frames (use dask, instead, save memory)\r\n print(\"Reading saved event tables back into memory...\")\r\n efiles = glob(os.path.join(self.proj_dir, \"tables/events/*csv\"))\r\n efiles = [e for e in efiles if e[-10:-4] in self.tiles]\r\n edfs = [pd.read_csv(e) for e in efiles]\r\n\r\n # Merge with existing records\r\n print(\"Concatenating event tables...\")\r\n df = pd.concat(edfs)\r\n\r\n def toDays(date, base):\r\n date = dt.datetime.strptime(date, \"%Y-%m-%d\")\r\n delta = (date - base)\r\n days = delta.days\r\n return days\r\n\r\n print(\"Creating unique ids...\")\r\n df[\"id\"] = df[\"tile\"] + \"_\" + df[\"id\"].astype(str)\r\n\r\n print(\"Converting days since 1970 to dates...\")\r\n df[\"days\"] = df[\"date\"].apply(toDays, base=base)\r\n\r\n # Cut the edge events out into a separate df\r\n print(\"Separating tile edge events from center events...\")\r\n edges = df[df[\"edge\"] == True]\r\n not_edges = df[df[\"edge\"] == False]\r\n\r\n # Merge where needed\r\n print(\"Merging edge-case tile events...\")\r\n eids = list(edges[\"id\"].unique())\r\n for iden in tqdm(eids, position=0, file=sys.stdout):\r\n # Split, one vs all\r\n edf = edges[edges[\"id\"] == iden]\r\n edf2 = edges[edges[\"id\"] != iden]\r\n days = edf[\"days\"]\r\n\r\n # Sometimes these are empty\r\n try:\r\n d1 = min(days)\r\n d2 = max(days)\r\n except Exception:\r\n pass\r\n\r\n # If events aren't close enough in time the list will be empty\r\n edf2 = edf2[(abs(edf2[\"days\"] - d1) < self.temporal_param) | (abs(edf2[\"days\"] - d2) < self.temporal_param)]\r\n eids2 = list(edf2[\"id\"].unique())\r\n\r\n # If there are event close in time, are they close in space?\r\n for iden2 in eids2:\r\n edf2 = edges[edges[\"id\"] == iden2]\r\n ydiffs = [y - edf2[\"y\"].values for y in edf[\"y\"].values]\r\n xdiffs = [x - edf2[\"x\"].values for x in edf[\"x\"].values]\r\n ychecks = [spCheck(yds, self.sp_buf) for yds in ydiffs]\r\n xchecks = [spCheck(xds, self.sp_buf) for xds in xdiffs]\r\n checks = [ychecks[i] * xchecks[i] for i in range(len(ychecks))]\r\n if any(checks):\r\n # Merge events! Merge into the earliest event\r\n # MC - Need to replace this with the event builder on these points (?)\r\n d12 = edf2[\"days\"].min()\r\n if d1 < d12:\r\n edges[\"id\"][edges[\"id\"] == iden2] = iden\r\n else:\r\n edges[\"id\"][edges[\"id\"] == iden] = iden2\r\n\r\n # Concatenate edge df back into main df\r\n print(\"Recombining edge and center cases...\")\r\n df = pd.concat([not_edges, edges])\r\n\r\n # Reset id values in chronological order\r\n print(\"Resetting ids in chronological order..\")\r\n df[\"first\"] = df.groupby(\"id\").days.transform(\"min\")\r\n firsts = df[[\"id\", \"first\"]].drop_duplicates()\r\n firsts = firsts.sort_values(\"first\")\r\n firsts[\"new_id\"] = range(1, firsts.shape[0] + 1)\r\n idmap = dict(zip(firsts[\"id\"], firsts[\"new_id\"]))\r\n df[\"id\"] = df[\"id\"].map(idmap)\r\n df = df.sort_values(\"id\")\r\n\r\n # put these in order\r\n df = df[[\"id\", \"tile\", \"date\", \"x\", \"y\"]]\r\n\r\n # Finally save\r\n print(\"Saving merged event table to \" + self.file_name)\r\n df.to_csv(self.file_name, index=False)", "def time(self,orid_time,window=5):\n #{{{ Function to get possible matches of events for some epoch time.\n\n results = {}\n\n #\n # If running in simple mode we don't have access to the tables we need\n #\n if config.simple:\n return results\n\n orid_time = _isNumber(orid_time)\n\n if not orid_time:\n print \"Not a valid number in function call: %s\" % orid_time\n return\n \n start = float(orid_time)-float(window)\n end = float(orid_time)+float(window)\n\n dbname = self.dbcentral(orid_time)\n\n if not db:\n print \"No match for orid_time in dbcentral object: (%s,%s)\" % (orid_time,self.dbcentral(orid_time))\n return\n\n try: \n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='origin')\n db.query(datascope.dbTABLE_PRESENT) \n except Exception,e:\n print \"Exception on Events() time(%s): Error on db pointer %s [%s]\" % (orid_time,db,e)\n return\n\n db.subset( 'time >= %f' % start )\n db.subset( 'time <= %f' % end )\n\n try:\n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='wfdisc' )\n records = db.query(datascope.dbRECORD_COUNT)\n\n except:\n records = 0\n\n if records:\n\n for i in range(records):\n\n db.record = i\n\n (orid,time) = db.getv('orid','time')\n\n orid = _isNumber(orid)\n time = _isNumber(time)\n results[orid] = time\n\n return results", "def _add_time_events(self, events: List[Event]) -> List[Event]:\n\n # Add time events\n all_events = []\n previous_tick = 0\n previous_note_end = 0\n for e, event in enumerate(events):\n # No time shift\n if event.time != previous_tick:\n # (Rest)\n if (\n self.config.use_rests\n and event.time - previous_note_end >= self._min_rest\n ):\n previous_tick = previous_note_end\n rest_values = self._ticks_to_duration_tokens(\n event.time - previous_tick, rest=True\n )\n for dur_value, dur_ticks in zip(*rest_values):\n all_events.append(\n Event(\n type=\"Rest\",\n value=\".\".join(map(str, dur_value)),\n time=previous_tick,\n desc=f\"{event.time - previous_tick} ticks\",\n )\n )\n previous_tick += dur_ticks\n\n # Time shift\n # no else here as previous might have changed with rests\n if event.time != previous_tick:\n time_shift = event.time - previous_tick\n for dur_value, dur_ticks in zip(\n *self._ticks_to_duration_tokens(time_shift)\n ):\n all_events.append(\n Event(\n type=\"TimeShift\",\n value=\".\".join(map(str, dur_value)),\n time=previous_tick,\n desc=f\"{time_shift} ticks\",\n )\n )\n previous_tick += dur_ticks\n previous_tick = event.time\n\n all_events.append(event)\n\n # Update max offset time of the notes encountered\n if event.type in [\"NoteOn\"]:\n previous_note_end = max(previous_note_end, event.desc)\n elif event.type in [\n \"Program\",\n \"Tempo\",\n \"Pedal\",\n \"PedalOff\",\n \"PitchBend\",\n \"Chord\",\n ]:\n previous_note_end = max(previous_note_end, event.time)\n\n return all_events", "def build_timings(events):\n\n stack = []\n timings = []\n for e in events:\n if e.type == 'START':\n stack.append(e)\n elif e.type == 'FINISH':\n prev = stack.pop()\n if prev.step != e.step:\n raise Exception(\n \"\"\"I have a FINISH event for the START event of a\n different step\"\"\")\n yield Proc(e.step, prev.timestamp, e.timestamp, e.job)", "async def test_multiple_start_events(\n hass: HomeAssistant,\n calls: Callable[[], list[dict[str, Any]]],\n fake_schedule: FakeSchedule,\n) -> None:\n\n event_data1 = fake_schedule.create_event(\n start=datetime.datetime.fromisoformat(\"2022-04-19 10:45:00+00:00\"),\n end=datetime.datetime.fromisoformat(\"2022-04-19 11:00:00+00:00\"),\n )\n event_data2 = fake_schedule.create_event(\n start=datetime.datetime.fromisoformat(\"2022-04-19 11:00:00+00:00\"),\n end=datetime.datetime.fromisoformat(\"2022-04-19 11:15:00+00:00\"),\n )\n async with create_automation(hass, EVENT_START):\n await fake_schedule.fire_until(\n datetime.datetime.fromisoformat(\"2022-04-19 11:30:00+00:00\")\n )\n assert calls() == [\n {\n \"platform\": \"calendar\",\n \"event\": EVENT_START,\n \"calendar_event\": event_data1,\n },\n {\n \"platform\": \"calendar\",\n \"event\": EVENT_START,\n \"calendar_event\": event_data2,\n },\n ]", "def get_game_time_events_window(game, start_minute=None, end_minute=None):\n queryset = get_game_time_events(game) \n\n #Handle cases where start min could be a stoppage time; always include stoppage timeEvents\n if start_minute == -2:\n raise Exception(\"The start of a time window shouldn't be second half stoppage\")\n elif start_minute == -1:\n approx_start = 45.1\n queryset = queryset.filter(Q(minute__gt=approx_start) | Q(minute__lt=0))\n elif start_minute >= 0:\n queryset = queryset.filter(Q(minute__gt=start_minute) | Q(minute__lt=0))\n\n #Handle cases where end min could be a stoppage time; always include stoppage timeEvents\n if end_minute == -1:\n approx_end = 45.1\n queryset = queryset.filter(Q(minute__lte=approx_end) | Q(minute__lt=0))\n elif end_minute == -2:\n approx_end = 90.1\n queryset = queryset.filter(Q(minute__lte=approx_end)) \n elif end_minute >= 0:\n queryset = queryset.filter(Q(minute__lte=end_minute) | Q(minute__lt=0))\n \n # NOW decide whether or not to kick out stoppage events\n if (not _include_first_half_stoppage(start_minute, end_minute)):\n queryset = queryset.exclude(\n minute=TimeEvent.FIRST_HALF_EXTRA_TIME)\n if (not _include_second_half_stoppage(start_minute, end_minute)):\n queryset = queryset.exclude(\n minute=TimeEvent.SECOND_HALF_EXTRA_TIME)\n\n return queryset", "def find_runs(phys_in, ntp_list, tr_list, thr=None, padding=9):\n # Initialize dictionaries to save run timestamps and phys_in attributes\n run_timestamps = {}\n\n # Express the padding in samples equivalent\n padding = padding * phys_in.freq[0]\n\n # enumerate user input num_timepoints_expected\n for run_idx, run_tps in enumerate(ntp_list):\n\n # correct time offset for this iteration's object\n phys_in.check_trigger_amount(thr=thr, num_timepoints_expected=run_tps,\n tr=tr_list[run_idx])\n # If it's the very first run, start the run at sample 0,\n # otherwise start is first trigger (adjust with padding later)\n if run_idx == 0:\n run_start = 0\n else:\n run_start = int(np.where(np.isclose(phys_in.timeseries[0], 0))[0])\n\n # Defining end of acquisition\n # run length in seconds\n end_sec = run_tps * tr_list[run_idx]\n\n # define index of the run's last trigger + padding (HAS TO BE INT type)\n # pick first value of time array that is over specified run length\n # where returns list of values over end_sec and its dtype, choose [list][first value]\n run_end = int(np.where(phys_in.timeseries[0] > end_sec)[0][0] + padding)\n update = int(run_end - padding + 1)\n\n # if the padding is too much for the remaining timeseries length\n # then the padding stops at the end of recording\n if phys_in.timeseries[0].shape[0] < run_end:\n run_end = phys_in.timeseries[0].shape[0]\n\n # Adjust timestamps with previous end_index\n # Except if it's the first run\n if run_idx > 0:\n previous_end_index = run_timestamps[run_idx][1]\n # adjust time_offset to keep original timing information\n phys_in.time_offset = phys_in.time_offset + run_timestamps[run_idx][2]\n # update run_start, removing 2 paddings (one for this run, one for the previous)\n run_start = int(run_start + previous_end_index - 2 * padding)\n # update run_end, removing the padding of the previous end\n run_end = int(run_end + previous_end_index - padding)\n\n # Save *start* and *end_index* in dictionary along with *time_offset* and *ntp found*\n # dict key must be readable by human\n # LGRinfo\n LGR.info('\\n--------------------------------------------------------------\\n'\n f'Slicing between {(run_start/phys_in.freq[phys_in.trigger_idx])} seconds and '\n f'{run_end/phys_in.freq[phys_in.trigger_idx]} seconds\\n'\n '--------------------------------------------------------------')\n\n run_timestamps[run_idx + 1] = (run_start, run_end,\n phys_in.time_offset,\n phys_in.num_timepoints_found)\n\n # update the object so that next iteration will look for the first trigger\n # after previous run's last trigger. maybe padding extends to next run\n phys_in = deepcopy(phys_in[update:-1])\n\n return run_timestamps", "def getTrialsLists_fromContinuous(Evts, T0Codes, stimCodes, respCodes, \\\r\n onsetCodes=['onset'], offsetCodes=['offset'], \\\r\n otherCodes=[], endCodes=[],\\\r\n minStim=1, maxStim=1, minResp=0, maxResp=1, tmax=None, \\\r\n stop_after_response=False, stop_after_offset=True):\r\n import warnings \r\n warnings.simplefilter('default')\r\n warnings.warn('getTrialsLists_fromContinuous will be deleted in future version, use events_to_lists instead.', DeprecationWarning)\r\n import numpy as np\r\n\r\n if otherCodes == []:\r\n otherCodes = [c for c in np.unique(Evts.code) if c not in utilsfunc.in_list(stimCodes) + utilsfunc.in_list(respCodes) + utilsfunc.in_list(onsetCodes) + utilsfunc.in_list(offsetCodes)]\r\n\r\n startIdx = Evts.findEvents(code=T0Codes)\r\n trialIdx = np.hstack((startIdx, Evts.nbEvents()))\r\n\r\n nrTrials = len(startIdx)\r\n trial_list = [None] * nrTrials\r\n stim_list = [None] * nrTrials\r\n resp_list = [None] * nrTrials\r\n onset_list = [None] * nrTrials\r\n offset_list = [None] * nrTrials\r\n otherEvt_list = [None] * nrTrials\r\n\r\n for idx in range(len(startIdx)):\r\n\r\n code = Evts.code[trialIdx[idx]:trialIdx[idx+1]]\r\n chan = Evts.chan[trialIdx[idx]:trialIdx[idx+1]]\r\n sample = Evts.lat.sample[trialIdx[idx]:trialIdx[idx+1]]\r\n time = Evts.lat.time[trialIdx[idx]:trialIdx[idx+1]]\r\n\r\n endIdx = idx_end_event(code, chan, time, stimCodes, respCodes,\\\r\n onset_codes=onsetCodes, offset_codes=offsetCodes,\\\r\n min_stim=minStim, max_stim=maxStim, min_resp=minResp, max_resp=maxResp,\\\r\n tmax=tmax,\\\r\n stop_after_response=stop_after_response, stop_after_offset=stop_after_offset)\r\n\r\n EvtTrial = evt.Events(sample=sample[:endIdx+1], code=code[:endIdx+1], chan=chan[:endIdx+1], sf=Evts.sf)\r\n stimEvt, respEvt, onsetEvt, offsetEvt, otherEvt = read_trial_events(EvtTrial, stimCodes, respCodes, \\\r\n onset_codes=onsetCodes, offset_codes=offsetCodes,\\\r\n other_codes=otherCodes)\r\n\r\n trial_list[idx] = idx\r\n stim_list[idx] = stimEvt\r\n resp_list[idx] = respEvt\r\n onset_list[idx] = onsetEvt\r\n offset_list[idx] = offsetEvt\r\n otherEvt_list[idx] = otherEvt\r\n\r\n return trial_list, stim_list, resp_list, onset_list, offset_list, otherEvt_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returning the sync mode
def get_sync_mode(): return sync_mode
[ "def isSync(self):\n return True", "def synchronize_system_mode(self):\n\n return self._synchronize_system_mode", "def sync(self):\n return self.query('C' + self.channel[-1] + ':SYNC?')", "def lock_mode(self):\n return self._lock_mode", "def is_sync(self):\n return self.command == Command.sync", "def sync(self):\n return self._sync", "def isSync(self):\n return False", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def syncing(self):\n result = self._call(\"syncing\")\n if type(result) is not bool:\n return SyncStatus(**result)\n else:\n return result", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def _get_ldp_sync(self):\n return self.__ldp_sync", "def blocking_mode(self):\n return self.__blocking_mode", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def is_sync_enabled(self) -> bool:\n return self.is_enabled", "def get_synchronous_filter_status(self):\n return 'SYNC?'", "def get_wrap_sync_sleep(self):\n logging.debug(\"Getting wrap sync sleep flag: %s\", str(self.wrap_sync_sleep))\n return self.wrap_sync_sleep", "def SessionMode(self):\n if self.force_auto_sync:\n self.get('SessionMode')\n return self._SessionMode", "def current_mode(self):\r\n return self.mode" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checking the sync_mode based on the given configuration
def check_sync_mode(): global sync_mode _description = '' _modes = { SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)', SyncMode.SENDER: '(LOCAL ➔ REMOTE)', SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)', SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)', SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)', SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)', SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)', SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)', SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)' } for _mode, _desc in _modes.items(): if getattr(SyncMode, 'is_' + _mode.lower())(): sync_mode = _mode _description = _desc if is_import(): output.message( output.Subject.INFO, f'Import file {output.CliFormat.BLACK}{system.config["import"]}{output.CliFormat.ENDC}', True ) system.config['is_same_client'] = SyncMode.is_same_host() output.message( output.Subject.INFO, f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}', True )
[ "def get_sync_mode():\n return sync_mode", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def check_config_mode(self):\n return False", "def is_sync(self):\n return self.command == Command.sync", "def validate_sync_gateway_mode(mode):\n if mode != \"cc\" and mode != \"di\":\n raise ValueError(\"Sync Gateway mode must be 'cc' (channel cache) or 'di' (distributed index)\")", "async def check_config_mode(self):\r\n logger.info(\"Host {}: Checking configuration mode\".format(self._host))\r\n check_string = type(self)._config_check\r\n self._stdin.write(self._normalize_cmd(\"\\n\"))\r\n output = await self._read_until_prompt()\r\n return check_string in output", "def ModeCheck():\n return GLOBAL.MBCmode.get()", "def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False", "def is_sync_enabled(self) -> bool:\n return self.is_enabled", "def isSync(self):\n return True", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def DualMode(self) -> bool:", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def isSync(self):\n return False", "def check_config_mode(self, check_string=\">config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "async def check_atomicity(self, guild: discord.Guild) -> bool:\n global_atomic = await self.config.atomic()\n server_atomic = await self.config.guild(guild).atomic()\n if server_atomic is None:\n return global_atomic\n else:\n return server_atomic", "def synchronize_system_mode(self):\n\n return self._synchronize_system_mode", "def _check_for_syncs(self):\n query = 'SELECT * FROM bucardo.sync LIMIT 1'\n conn = psycopg2.connect(self.bucardo_conn_pg_format)\n try:\n with conn.cursor() as cur:\n cur.execute(query)\n syncs = cur.fetchone()\n finally:\n conn.close()\n if syncs:\n return True\n else:\n return False", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if given client is remote client
def is_remote(client): if client == Client.ORIGIN: return is_origin_remote() elif client == Client.TARGET: return is_target_remote() elif client == Client.LOCAL: return False else: return False
[ "def is_remote(self):\n pass", "def isClientHost(self):\n return self.serverThread is not None", "def is_local_client(self):\n return self.msg.is_local_client", "def is_remote(self):\n return False", "def allowed_client(self):\n ip = self.client_address[0]\n return ip in self.server.relays", "def is_remote(self):\n return isinstance(self, (ServiceScan, OSScan, Exploit))", "def isRemote(self):\n\n return False", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def has_client(self):\n return self.client is not None", "def is_client_alive(self, client):\n client_conn = self.all_clients[client]\n try:\n\n ping_message = Message(\"server\", client, \"utility\", \"ping\")\n client_conn.send(str.encode(ping_message.pack_to_json_string()))\n\n except Exception as e:\n print(\"Client communication error \" + str(e))\n return False\n return True", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def IsRemote(path):\n\treturn path[0:7] == \"http://\"", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def client_exists(self, client=None):\n if type(client) is Client:\n return client.client_id in [c.client_id for c in self.client_list]\n else:\n return False", "def is_client(self, endpoint):\n\t\twith self._lock:\n\t\t\treturn endpoint in self._clients", "def checkClientMode():\n if 'TOR_PT_CLIENT_TRANSPORTS' in os.environ: return True\n if 'TOR_PT_SERVER_TRANSPORTS' in os.environ: return False\n raise EnvError('neither TOR_PT_{SERVER,CLIENT}_TRANSPORTS set')", "def _is_remote_request(self):\n\n if self.request.get_header('X-OGDS-CID', None):\n return True\n else:\n return False", "def is_local_connection(self):\n return self.s.getpeername()[0] == self.s.getsockname()[0]", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if target is remote client
def is_target_remote(): return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE, SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)
[ "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_remote(self):\n pass", "def is_remote(self):\n return isinstance(self, (ServiceScan, OSScan, Exploit))", "def is_remote(self):\n return False", "def isRemote(self):\n\n return False", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def isClientHost(self):\n return self.serverThread is not None", "def allowed_client(self):\n ip = self.client_address[0]\n return ip in self.server.relays", "def is_local_client(self):\n return self.msg.is_local_client", "def _is_remote_request(self):\n\n if self.request.get_header('X-OGDS-CID', None):\n return True\n else:\n return False", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def checkClientMode():\n if 'TOR_PT_CLIENT_TRANSPORTS' in os.environ: return True\n if 'TOR_PT_SERVER_TRANSPORTS' in os.environ: return False\n raise EnvError('neither TOR_PT_{SERVER,CLIENT}_TRANSPORTS set')", "def IsRemote(path):\n\treturn path[0:7] == \"http://\"", "def isServer(self):", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def _is_task_on_responsible_client(self):\n return get_client_id() == self.context.responsible_client", "def host_is_target(target):\n return host_arch_target() == target_arch(target)", "def has_client(self):\n return self.client is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if origin is remote client
def is_origin_remote(): return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE, SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)
[ "def is_remote(self):\n pass", "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_remote(self):\n return False", "def isRemote(self):\n\n return False", "def _is_remote_request(self):\n\n if self.request.get_header('X-OGDS-CID', None):\n return True\n else:\n return False", "def is_remote(self):\n return isinstance(self, (ServiceScan, OSScan, Exploit))", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def IsRemote(path):\n\treturn path[0:7] == \"http://\"", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_local_client(self):\n return self.msg.is_local_client", "def isClientHost(self):\n return self.serverThread is not None", "def is_local_connection(self):\n return self.s.getpeername()[0] == self.s.getsockname()[0]", "def allowed_client(self):\n ip = self.client_address[0]\n return ip in self.server.relays", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def isServer(self):", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def checkClientMode():\n if 'TOR_PT_CLIENT_TRANSPORTS' in os.environ: return True\n if 'TOR_PT_SERVER_TRANSPORTS' in os.environ: return False\n raise EnvError('neither TOR_PT_{SERVER,CLIENT}_TRANSPORTS set')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if sync mode is import
def is_import(): return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)
[ "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def is_import(self):\n return self.sh_info is None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK' or \\\n self.binding == 'STT_FUNC')", "def import_only(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"import_only\")", "def is_sync(self):\n return self.command == Command.sync", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def is_import_completion(self):\n current_line = self.get_current_line()\n\n # Seperate cases! More difficult than I thought\n match = re.match(r\"(import)|(from)\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before == \"from\" or word_before == \"import\":\n # Need to check for multiple imports! (TODO)\n return True\n\n return False", "def is_import_from_completion(self):\n\n current_line = self.get_current_line()\n\n match = re.match(r\"from .* import\", current_line)\n if match and self.get_word() != \"import\":\n return True\n\n return False", "def isSync(self):\n return True", "def isSync(self):\n return False", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def is_sync_enabled(self) -> bool:\n return self.is_enabled", "def is_sync_instance(self):\n return True", "def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True", "def is_sync_available():\n\n from t_system import network_connector\n\n return network_connector.is_network_online()", "def _check_for_syncs(self):\n query = 'SELECT * FROM bucardo.sync LIMIT 1'\n conn = psycopg2.connect(self.bucardo_conn_pg_format)\n try:\n with conn.cursor() as cur:\n cur.execute(query)\n syncs = cur.fetchone()\n finally:\n conn.close()\n if syncs:\n return True\n else:\n return False", "def set_import_mode(self, flag):\n\t\tif self.buttonImport.get_active() and not flag:\n\t\t\tself.buttonImport.set_active(False)\n\t\t\treturn True\n\t\telif not self.buttonImport.get_active() and flag:\n\t\t\tself.buttonImport.set_active(True)\n\t\t\treturn True\n\t\treturn False", "def default_use_for_import():\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert valid court order.
def test_court_orders(session, test_status, expected_code, expected_msg): business = factory_business('BC1234567') filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE) del filing['filing']['courtOrder']['fileKey'] if test_status == 'FAIL': del filing['filing']['courtOrder']['orderDetails'] filing['filing']['courtOrder']['effectOfOrder'] = 'invalid' err = validate(business, filing) if expected_code: assert err.code == expected_code assert lists_are_equal(err.msg, expected_msg) else: assert err is None
[ "def test_validate_invalid_court_orders(invalid_court_order):\n registration_json = copy.deepcopy(REGISTRATION)\n registration_json['courtOrder'] = invalid_court_order\n legal_filing = {'registration': registration_json}\n\n is_valid, errors = validate(legal_filing, 'registration')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def verify_cash(self, position):\n self.assertEqual(len(position), 8)\n self.assertEqual(position['Local CCY'], 'USD')\n self.assertAlmostEqual(position['Position Accounting Market Value (Local CCY)'], 9404448.39)\n self.assertAlmostEqual(position['Accrued Interest'], 0)\n self.assertAlmostEqual(position['Exchange Rate'], 0.151145)\n self.assertAlmostEqual(position['Accounting Market Value (VCY)'], 62221366.17)\n self.assertEqual(position['As Of'], datetime.datetime(2017,10,17))", "def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])", "def test_course_listing_has_pre_requisite_courses(self):\n course_location2 = self.store.make_course_key('Org1', 'Course2', 'Run2')\n self._create_course_with_access_groups(course_location2)\n pre_requisite_course_location = self.store.make_course_key('Org1', 'Course3', 'Run3')\n self._create_course_with_access_groups(pre_requisite_course_location)\n pre_requisite_course_location2 = self.store.make_course_key('Org1', 'Course4', 'Run4')\n self._create_course_with_access_groups(pre_requisite_course_location2)\n # create a course with pre_requisite_courses\n pre_requisite_courses = [\n str(pre_requisite_course_location),\n str(pre_requisite_course_location2),\n ]\n course_location = self.store.make_course_key('Org1', 'Course1', 'Run1')\n self._create_course_with_access_groups(course_location, {\n 'pre_requisite_courses': pre_requisite_courses\n })\n\n set_prerequisite_courses(course_location, pre_requisite_courses)\n # get dashboard\n course_enrollments = list(get_course_enrollments(self.student, None, []))\n courses_having_prerequisites = frozenset(\n enrollment.course_id for enrollment in course_enrollments\n if enrollment.course_overview.pre_requisite_courses\n )\n courses_requirements_not_met = get_pre_requisite_courses_not_completed(\n self.student,\n courses_having_prerequisites\n )\n assert len(courses_requirements_not_met[course_location]['courses']) == len(pre_requisite_courses)", "def _AssertValid(self):\r\n # CalculateContactId will assert several things, too.\r\n if self.IsRemoved():\r\n # A removed contact is not expected to have a contact_id calculated from it's details\r\n # because removed contacts are stored without contact details.\r\n assert self.contact_source is not None and self.contact_source in Contact.ALL_SOURCES, self\r\n else:\r\n contact_id = Contact.CalculateContactId(self._asdict())\r\n assert contact_id == self.contact_id, self\r\n assert self.timestamp is not None, self\r\n sort_key = Contact.CreateSortKey(self.contact_id, self.timestamp)\r\n assert sort_key == self.sort_key, self", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_validate_bad_orders(self):\n exc_type = ValidationException\n invalid_order = copy.deepcopy(self.base_order)\n c = 0 # For initial debugging\n\n for proj in testorders.good_test_projections:\n invalid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n invalid_list = testorders.InvalidOrders(invalid_order, self.base_schema, abbreviated=True)\n\n for order, test, exc in invalid_list:\n # issues getting assertRaisesRegExp to work correctly\n with self.assertRaises(exc_type):\n try:\n c += 1\n api.validation(order, self.staffuser.username)\n except exc_type as e:\n if str(exc) in str(e):\n raise\n else:\n self.fail('\\n\\nExpected in exception message:\\n{}'\n '\\n\\nException message raised:\\n{}'\n '\\n\\nUsing test {}'.format(str(exc), str(e), test))\n else:\n self.fail('\\n{} Exception was not raised\\n'\n '\\nExpected exception message:\\n{}\\n'\n '\\nUsing test: {}'.format(exc_type, str(exc), test))\n #print c # For initial debugging", "def test_ordering(self):\n tc1 = self.F.CaseFactory.create(product=self.p)\n tcv1 = self.F.CaseVersionFactory.create(\n case=tc1, productversion=self.pv8, status=\"active\")\n tc2 = self.F.CaseFactory.create(product=self.p)\n tcv2 = self.F.CaseVersionFactory.create(\n case=tc2, productversion=self.pv8, status=\"active\")\n tc3 = self.F.CaseFactory.create(product=self.p)\n tcv3 = self.F.CaseVersionFactory.create(\n case=tc3, productversion=self.pv8, status=\"active\")\n\n ts1 = self.F.SuiteFactory.create(product=self.p, status=\"active\")\n self.F.SuiteCaseFactory.create(suite=ts1, case=tc3, order=1)\n ts2 = self.F.SuiteFactory.create(product=self.p, status=\"active\")\n self.F.SuiteCaseFactory.create(suite=ts2, case=tc2, order=1)\n self.F.SuiteCaseFactory.create(suite=ts2, case=tc1, order=2)\n\n r = self.F.RunFactory.create(productversion=self.pv8)\n self.F.RunSuiteFactory.create(suite=ts2, run=r, order=1)\n self.F.RunSuiteFactory.create(suite=ts1, run=r, order=2)\n\n r.activate()\n\n self.assertOrderedCaseVersions(r, [tcv2, tcv1, tcv3])", "def _validate_content_order(content, expected):\n pattern = '.*'.join(expected)\n flags = re.MULTILINE | re.DOTALL\n assert re.search(pattern, content, flags)", "def test_02_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_01_lunch_order()\r\n #We have a confirmed order with its associate cashmove\r\n #We execute the cancel function\r\n self.order_one.cancel()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #We check that the state is cancelled and that the cashmove has been deleted\r\n self.assertEqual(self.order_one.state,'cancelled')\r\n self.assertFalse(self.order_one.cashmove)", "def test_required_properties_order() -> None:\n soup = generate_case(\"required_properties_order\")\n\n tests.html_schema_doc_asserts.assert_undocumented_required(soup, [\"a\", \"b\", \"b\", \"a\"])", "def _assert_prereq(self, html, sequence):\n assert 'seq_module.html' in html\n html = self.get_context_dict_from_string(html)\n assert 'This section is a prerequisite. You must complete this section in order to unlock additional content.' == html['banner_text']\n assert not html['gated_content']['gated']\n assert str(sequence.location) == html['item_id']\n assert html['gated_content']['prereq_url'] is None\n assert html['gated_content']['prereq_section_name'] is None\n assert 'NextSequential' == html['next_url']\n assert 'PrevSequential' == html['prev_url']", "def test_headlines_order(self) -> None:\n last: Tuple[int, str] = (0, \"\")\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if (not rule) or (rule.order is None):\n continue\n\n last_order, last_headline = last # type: int, str\n if last_order > rule.order:\n self.add_error(\n (\n f\"Rubriken {headline.name} ska komma före \"\n f\"rubriken {last_headline}.\"\n ),\n headline=headline,\n )\n\n last = (rule.order, headline.name)", "def test_create_course(self):\r\n self.assert_created_course()", "def test_american_exercise_barrier(self):\n S0 = 100\n n = 100\n T = 5\n strike = 100\n sigma = 0.4\n r = 0.1\n mdl = create_binomial_model(sigma, r, S0, T, n)\n exercise_barrier = american_put_exercise_barrier(mdl, strike)\n last_even = np.nan\n last_odd = np.nan\n for i, s in enumerate(exercise_barrier):\n if i % 2 == 0:\n if not np.isnan(last_even):\n self.assertGreaterEqual(s, last_even)\n last_even = s\n elif i % 2 == 1:\n if not np.isnan(last_odd):\n self.assertGreaterEqual(s, last_odd)\n last_odd = s", "def test_order_list(self):\n pass", "def test_ordering(self):\r\n tc1 = self.F.CaseFactory.create(product=self.p)\r\n tcv1 = self.F.CaseVersionFactory.create(\r\n case=tc1, productversion=self.pv8, status=\"active\")\r\n tc2 = self.F.CaseFactory.create(product=self.p)\r\n tcv2 = self.F.CaseVersionFactory.create(\r\n case=tc2, productversion=self.pv8, status=\"active\")\r\n tc3 = self.F.CaseFactory.create(product=self.p)\r\n tcv3 = self.F.CaseVersionFactory.create(\r\n case=tc3, productversion=self.pv8, status=\"active\")\r\n tc4 = self.F.CaseFactory.create(product=self.p)\r\n tcv4 = self.F.CaseVersionFactory.create(\r\n case=tc4, productversion=self.pv8, status=\"active\")\r\n\r\n ts1 = self.F.SuiteFactory.create(product=self.p, status=\"active\")\r\n self.F.SuiteCaseFactory.create(suite=ts1, case=tc3, order=1)\r\n self.F.SuiteCaseFactory.create(suite=ts1, case=tc4, order=2)\r\n ts2 = self.F.SuiteFactory.create(product=self.p, status=\"active\")\r\n self.F.SuiteCaseFactory.create(suite=ts2, case=tc1, order=1)\r\n self.F.SuiteCaseFactory.create(suite=ts2, case=tc2, order=2)\r\n\r\n r = self.F.RunFactory.create(productversion=self.pv8)\r\n self.F.RunSuiteFactory.create(suite=ts2, run=r, order=1)\r\n self.F.RunSuiteFactory.create(suite=ts1, run=r, order=2)\r\n\r\n r.activate()\r\n\r\n self.assertOrderedCaseVersions(r, [tcv1, tcv2, tcv3, tcv4])", "def testSortOrder(self):\r\n timestamp = time.time()\r\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\r\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\r\n self.assertGreater(comment_id2, comment_id1)", "def test_cancel_order(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a text label for an axis describing a provided CSV column.
def get_label(column): for key, label in column_to_label.items(): if key in column: return label
[ "def _label(self, column):\n # XXX\n return column", "def getAxisLabel(self, dim=0):\n return self.__axis_labels__[dim]", "def label(self, row: Dict[str, str]) -> str:\n\n return row['Annotation']", "def getColLabel(self, i=':'):\n if i == ':':\n return self.colLabels\n return self.colLabels[i]", "def get_target(csv, text = False):\n y_mapping = {'BL1':0, 'PA1':1, 'PA2':2, 'PA3':3,'PA4':4}\n \n idx = max( csv.find('PA'), csv.find('BL'))\n label = csv[idx:idx+3]\n if text:\n return label\n return y_mapping[label]", "def _axis_label_from_meta(\n pdata: dict,\n **kwargs\n) -> str:\n return f\"{pdata['system']} {pdata['name']} [{pdata['var_unit']}]\"", "def label(x):\n if isinstance(x,xbrl.taxonomy.Concept):\n concept = x\n else:\n concept = x.concept\n labels = list(concept.labels(lang='en',label_role=xbrl.taxonomy.ROLE_LABEL))\n return labels[0].text if labels else prefixed_name(x)", "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]", "def getLabel(self):\n return self.axisItem.labelText", "def get_labels(df, label):\n return df[label]", "def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')", "def get_axis_labels(chart): \n axes = chart['chxt'].split(',')\n axis_labels = [[] for _ in axes]\n axis_index = 0\n for label in chart['chxl'].split('|'):\n if label.endswith(':'):\n axis_index = int(label[:1])\n assert 0 <= axis_index < len(axes), axis_index\n else:\n axis_labels[axis_index].append(label)\n return dict((axis, axis_labels[i]) for i, axis in enumerate(axes))", "def _curve_labels(self, x_axis, sample, ylabel):\n return str(sample), x_axis.capitalize(), sample", "def label(field):\n if hasattr(field,'long_name'):\n return field.long_name\n elif hasattr(field,'units'):\n return \"%s (%s)\"%(field.nxname,field.units)\n else:\n return field.nxname", "def get_axis_label(nom, denom=None):\n label = AXISNAME[nom]\n if denom:\n label = r\"$\\Delta$\" + label + r\"/$\\Delta$\" + AXISNAME[denom]\n return label", "def get_label(self, table_idx):\n table = self.__tables[table_idx]\n for column in table.findall('column'):\n if column.get('name') in self.__companies and column.text != 'NULL':\n try:\n return int(column.text)\n except ValueError:\n return 0", "def _get_labels(data, axis=0, always=True):\n # NOTE: Previously inferred 'axis 1' metadata of 1D variable using the\n # data values metadata but that is incorrect. The paradigm for 1D plots\n # is we have row coordinates representing x, data values representing y,\n # and column coordinates representing individual series.\n if axis not in (0, 1, 2):\n raise ValueError(f'Invalid axis {axis}.')\n labels = None\n _load_objects()\n if isinstance(data, ndarray):\n if not always:\n pass\n elif axis < data.ndim:\n labels = np.arange(data.shape[axis])\n else: # requesting 'axis 1' on a 1D array\n labels = np.array([0])\n # Xarray object\n # NOTE: Even if coords not present .coords[dim] auto-generates indices\n elif isinstance(data, DataArray):\n if axis < data.ndim:\n labels = data.coords[data.dims[axis]]\n elif not always:\n pass\n else:\n labels = np.array([0])\n # Pandas object\n elif isinstance(data, (DataFrame, Series, Index)):\n if axis == 0 and isinstance(data, (DataFrame, Series)):\n labels = data.index\n elif axis == 1 and isinstance(data, (DataFrame,)):\n labels = data.columns\n elif not always:\n pass\n else: # beyond dimensionality\n labels = np.array([0])\n # Everything else\n # NOTE: We ensure data is at least 1D in _to_arraylike so this covers everything\n else:\n raise ValueError(f'Unrecognized array type {type(data)}.')\n return labels", "def get_column_labels(): \n column_labels = ['Player Name', 'Match Index', 'Result', 'Match Duration', 'Player Hero', 'Player Team']\n for i in range(5):\n column_labels.append('Radiant Hero ' + str(i+1))\n for i in range(5):\n column_labels.append('Dire Hero ' + str(i+1))\n for i in range(5):\n column_labels.append('Player Item ' + str(i+1))\n for i in range(25):\n column_labels.append('Skill Level ' + str(i+1))\n return column_labels", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all possible values of a column in the pandas.DataFram list
def dfs_all_values(dfs, column): values = [] # loop over all (pandas.DataFrame, str) pairs for df in dfs: values.extend(df[column].tolist()) # set() removes duplicates # sorted() converts Set to List and sort the elements return sorted(set(values))
[ "def get_possible_matches(df, **columns):\n\n for col_name, col_value in columns.items():\n df = df.loc[df[col_name] == col_value]\n\n return df", "def get_values(self, col) :\n\n if col not in self.cols :\n raise Exception('Column %s not in data' % col)\n\n select_sql = 'SELECT \"%s\" FROM \"%s\" ORDER BY __ROWID ASC' % (col, self.name)\n cur = self.con.cursor()\n cur.execute(select_sql)\n vs = cur.fetchall()\n return [v[0] for v in vs]", "def create_value_set(self, col):\n\n value_set = set()\n\n for df in self:\n value_set.update(df[col])\n return value_set", "def find_possible_values(grid: List[List[str]], pos: Tuple[int, int]) -> Set[str]:\n used_values = set()\n row, col = pos\n field = get_block(grid, (row, col))\n for line in field:\n for value in line:\n used_values.update(value)\n used_values.update(get_row(grid, (row, col)))\n used_values.update(get_col(grid, (row, col)))\n return [value for value in '123456789' if value not in used_values]", "def flat_set_from_df(df, col, condition=None):\n if condition is not None:\n df = df[condition]\n lists = df[col].tolist()\n return set([item for sublist in lists for item in sublist])", "def returnAllColumnValues(self,fieldname):\n values= []\n for i in range(len(self.rows)):\n values.append(self.returnTableValue(fieldname,i))\n return values", "def available_choices(filtered_games, colname):\n assert isinstance(filtered_games, pd.DataFrame)\n assert isinstance(colname, str) and colname in filtered_games.columns\n\n return(set(it.chain.from_iterable(filtered_games[colname])))", "def get_features(df: pd.DataFrame) -> List[str]:\n return list(set([col.split('_')[1] for col in df.columns]))", "def column_select(df,returnList = [\"x\",\"y\"]):\n df = df.sort_values(by = 'frame_id')\n return [ list(df[k]) for k in returnList]", "def create(df,column,list_):\n return df[df[column].isin(list_)]", "def get_columns_in_expression(exp: Expression) -> Set[Column]:\n return set(e for e in exp if isinstance(e, Column))", "def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values", "def get_multiple_matches(df, column, to_match_array):\n return df[df[column].isin(to_match_array)]", "def unique_values(df):\n cols = list(df.columns)\n\n for col in cols:\n uniques = (df[col]).unique()\n print(f\"{len(uniques)} unique items in {col}: {df[col].loc[0]},{df[col].loc[1]}, {df[col].loc[2]}...\")", "def lookup_allowed_values(db, table, column):\n query = QSqlQuery(db)\n query.prepare('SELECT allowed_value FROM allowed_values WHERE `table_name` = :table and `column_name` = :column')\n query.bindValue(':table', table)\n query.bindValue(':column', column)\n if not query.exec():\n raise SyntaxError(query.lastError().text())\n\n values = []\n while query.next():\n values.append(query.value('allowed_value'))\n\n return values", "def _load_data_values(self, dataframe):\n if dataframe.empty:\n return ['']\n return dataframe.values.tolist()", "def list_unique(df):\n\n # print unique values of each column\n for col in df.columns:\n print(f\"{col}:\")\n print(f\"{list(df[col].unique())}\\n\")", "def get_possible_sets(self):\n return self.sudoku.iter_all()", "def unique (a_data,a_column) :\n return list(__np.unique(a_data[a_column]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw multiple lines y(x) using data from the dfs list on the ax subplot.
def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max): xticks = dfs_all_values(dfs, x) # loop over all pandas.DataFrame objects for df in dfs: # setting the x-column as an index is required to draw the y-column # as a function of x argument df = df.set_index(x) # plot line on the subplot df[y].plot.line(ax=ax, rot=45, marker='.') if xscale == "linear": ax.set_xscale(xscale) else: ax.set_xscale(xscale, base=2) ax.xaxis.set_major_formatter(ScalarFormatter()) ax.set_xticks(xticks) ax.set_xlabel(get_label(x)) ax.set_ylabel(get_label(y)) ax.set_ylim(bottom=0) if yaxis_max is not None: ax.set_ylim(top=float(yaxis_max)) ax.legend(legend, fontsize=6) ax.grid(True)
[ "def plot(x, y, *dfs):\n ax = None\n for df in dfs:\n ax = df[[x, y]].set_index(x).plot(kind='line', ylim=(0, None), xlim=(0, None), ax=ax)", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def replot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n line.set_xdata(self.data[i].x)\n for line in self.lines: \n ax.draw_artist(line)", "def plot_datasets(datasets):\n\n\t# plt.grid(True)\n\n\tfor ds in datasets:\n\t\t(f, ax) = plt.subplots()\n\n\t\tax.grid(True)\n\n\t\tif 'xl' in ds:\n\t\t\tax.set_xlabel(ds['xl'])\n\t\tif 'yl' in ds:\n\t\t\tax.set_ylabel(ds['yl'])\n\n\t\tif 'xl' in ds and 'yl' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl'])\n\t\t\tf.canvas.set_window_title(title)\n\n\t\tif 'x' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl']) if 'title' not in ds else ds['title']\n\t\t\tf.canvas.set_window_title(title)\n\t\t\tmarker = 'y1m' in ds and ds['y1m'] or None\n\t\t\tax.plot(ds['x'], ds['y'], label=ds['yl'], marker=marker)\n\t\tif 'x2' in ds:\n\t\t\t# label = \"y2\" if 'y2l' not in ds else ds['y2l']\n\t\t\tlabel = 'y2l' in ds and ds['y2l'] or 'y2'\n\t\t\tmarker = 'y2m' in ds and ds['y2m'] or None\n\t\t\tax.plot(ds['x2'], ds['y2'], label=label, marker=marker)\n\t\t\tax.legend()\n\t\tif 'x3' in ds:\n\t\t\t# label = \"y3\" if 'y3l' not in ds else ds['y3l']\n\t\t\tlabel = 'y3l' in ds and ds['y3l'] or 'y3'\n\t\t\tmarker = 'y3m' in ds and ds['y3m'] or None\n\t\t\tax.plot(ds['x3'], ds['y3'], label=label, marker=marker)\n\t\t\tax.legend()\n\n\t\tif 'sub' in ds:\n\t\t\tfor sub in ds['sub']:\n\t\t\t\t# ax.set_ylabel(sub['yl'])\n\t\t\t\t# ax.set_xlabel(sub['xl'])\n\t\t\t\t# title = \"%s from %s\" % (sub['yl'], sub['xl']) if 'title' not in sub else sub['title']\n\t\t\t\t# f.canvas.set_window_title(title)\n\n\t\t\t\tlabel = 'yl' in sub and sub['yl']\n\t\t\t\tmarker = 'ym' in sub and sub['ym'] or None\n\t\t\t\tax.plot(sub['x'], sub['y'], label=label, marker=marker)\n\t\t\t\tax.legend()\n\n\t\tax.spines['left'].set_position('zero')\n\t\tax.spines['bottom'].set_position('zero')\n\t\tax.spines['left'].set_smart_bounds(True)\n\t\tax.spines['bottom'].set_smart_bounds(True)\n\n\tplt.show()", "def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()", "def plot_lines(self):\n self.plot(3)", "def make_stacked_lineplot(dfs, ys, fname, hue, x=\"year\", ylabels=None,\n figsize=(10, 12), ncol=4):\n # Plot\n fig, axes = plt.subplots(len(dfs), 1, figsize=figsize, sharex=True)\n for idx, (df, y) in enumerate(zip(dfs, ys)):\n df = df.sort_values(hue)\n sns.lineplot(x=x, y=y, hue=hue, data=df, ax=axes[idx],\n style=None, ci=None)\n # Legend\n handles, labels = axes[0].get_legend_handles_labels()\n axes[0].legend(handles=handles[0:], labels=labels[0:], ncol=ncol, loc=\"best\")\n for ax in axes[1:]:\n ax.get_legend().remove()\n # Aesthetics\n for i, ax in enumerate(axes):\n ax.set(xlabel=\"\", ylabel=ylabels[i])\n format_time_axis(ax, df[x].min(), df[x].max())\n ax.set_ylim(bottom=0, top=100)\n add_figure_letter(ax, i)\n # Save\n fig.savefig(fname, bbox_inches=\"tight\")\n plt.close(fig)", "def plot(self, *args, **kwargs):\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)", "def plot(self, days_per_line=1, **kwargs):\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n \n # Set relevant variables for the plot\n time_min = Time(self.starts[0].isot.split('T')[0])\n time_max = self.stops[-1]\n\n # Find the number of sub-plots to display the schedule\n if not isinstance(days_per_line, TimeDelta):\n days_per_line = TimeDelta(\n days_per_line,\n format='jd'\n )\n n_subplots = int(np.ceil((time_max - time_min)/days_per_line))\n\n # Initialize the figure\n fig, axs = plt.subplots(\n nrows=n_subplots,\n ncols=1,\n figsize=kwargs.get('figsize', (15, 3*n_subplots))\n )\n\n # Iterate over the sub-plots\n try:\n isIterable = iter(axs)\n except:\n axs = [axs]\n for i, ax in enumerate(axs):\n # Plot time limits\n tiMin = time_min + i*days_per_line\n tiMax = time_min + (i + 1)*days_per_line\n ax.set_xlim(\n left=tiMin.datetime,\n right=tiMax.datetime\n )\n\n # Display granularity\n if kwargs.get('grid', True):\n tMask = (self._startsJD >= tiMin.jd) *\\\n (self._stopsJD <= tiMax.jd)\n for slot_start in self.starts[tMask]:\n ax.axvline(\n slot_start.datetime,\n color='gray',\n linestyle='-',\n linewidth=0.5\n )\n\n # Display unavailable slots\n if self.reserved_blocks is not None:\n for bookedBlock in self.reserved_blocks:\n bookedBlock._display(ax=ax)\n\n # Display observation blocks\n if self.observation_blocks is not None:\n for obsBlock in self.observation_blocks:\n if not obsBlock.isBooked:\n continue\n obsBlock._display(ax=ax)\n\n # Formating\n ax.yaxis.set_visible(False)\n h_fmt = mdates.DateFormatter('%y-%m-%d\\n%H')\n ax.xaxis.set_major_formatter(h_fmt)\n\n # Save or show the figure\n figname = kwargs.get('figname', '')\n if figname != '':\n plt.savefig(\n figname,\n dpi=300,\n bbox_inches='tight',\n transparent=True\n )\n log.info(f\"Figure '{figname}' saved.\")\n else:\n plt.show()\n plt.close('all')", "def _data_plot(isotherm, current_branch, y1_style, y2_style, **iso_params):\n\n # Plot line 1\n y1_lbl = label_lgd(isotherm, lgd_keys, current_branch, branch, y1_data)\n\n if x_points is not None:\n x_p = x_points\n y1_p = _get_data(\n isotherm, y1_data, current_branch, y1_range, x_points,\n **iso_params\n )\n elif y1_points is not None:\n x_p = _get_data(\n isotherm, x_data, current_branch, x_range, y1_points,\n **iso_params\n )\n y1_p = y1_points\n else:\n x_p = _get_data(\n isotherm, x_data, current_branch, x_range, **iso_params\n )\n y1_p = _get_data(\n isotherm, y1_data, current_branch, y1_range, **iso_params\n )\n x_p, y1_p = x_p.align(y1_p, join='inner')\n\n ax1.plot(x_p, y1_p, label=y1_lbl, **y1_style)\n\n # Plot line 2 (if applicable)\n if y2_data and y2_data in keys(isotherm):\n\n y2_p = _get_data(\n isotherm, y2_data, current_branch, y2_range, **iso_params\n )\n aligned = x_p.align(y2_p, join='inner')\n\n y2_lbl = label_lgd(\n isotherm, lgd_keys, current_branch, branch, y2_data\n )\n ax2.plot(aligned[0], aligned[1], label=y2_lbl, **y2_style)", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )", "def replot(self,ax):\n self.XP_Plotter.replot(ax)\n # theoretical lines\n self.lines_theory[0].set_xdata(self.xx)\n self.lines_theory[1].set_xdata(self.xx)\n self.lines_theory[2].set_xdata(self.xx_itpl)\n for line in self.lines_theory: \n ax.draw_artist(line)", "def plot_lines(x, lines, train=False):\n plt.figure()\n if train:\n plt.plot(x_train, y_train, 'rx')\n for l in lines:\n plt.plot(x, l, '.-')", "def plotData(self, data):\n x_array = []\n y_array = []\n for i in data:\n x_array.append(i[0])\n y_array.append(i[1])\n self.fig.clear()\n self.axes = self.fig.add_subplot(111)\n self.line, = self.axes.plot(x_array, y_array, label=os.path.split(self.inputFile)[-1])\n handles, labels = self.axes.get_legend_handles_labels()\n self.fig.legend(handles,labels)\n self.axes.get_xaxis().get_major_formatter().set_useOffset(False)\n self.canvas.draw()", "def plot_disc_walkers(self, id_discs=None):\n # Making sure we have a list\n if not id_discs:\n id_discs = range(len(self.axes))\n elif type(id_discs) == int:\n id_discs = [id_discs]\n \n nplots = len(id_discs)\n fig, axes = plt.subplots(nplots, 3, sharex=True, figsize=(20, nplots*5))\n shape = axes.shape\n if len(shape) > 1:\n for axg in axes:\n for ax in axg:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n else:\n for ax in axes:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) \n \n \n for disc_id in id_discs:\n axis_name = {\"x\": \"yz\", \"y\": \"xz\", \"z\": \"xy\"}[self.axes[disc_id]]\n param_name = ['a', 'b', 'M']\n for i in range(3):\n pid = disc_id*3+i\n samples = sampler.chain[:,:,pid].T\n if nplots > 1:\n axis = axes[disc_id][i]\n else:\n axis = axes[i]\n \n axis.plot(samples, color='k', alpha=10.0 / self.n_walkers)\n #axis.yaxis.set_major_locator(MaxNLocator(5))\n axis.set_ylabel('$'+param_name[i]+'_{{{0}{1}}}$'.format(axis_name, disc_id))\n axis.set_xlabel('Iteration')\n\n #plt.title('Parameter values for discs : ' + ', '.join(str(x) for x in id_discs))\n\n return fig", "def plot2D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n fig, ax = plt.subplots(figsize=figsize)\n\n for df, color in zip(dfs, cycle(COLORS)):\n X, Y = (df[col] for col in columns)\n plt.scatter(X, Y, c=color, marker=MARKER)\n\n for axis, col in zip(['x', 'y'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.index):\n corr = 2\n ax.annotate(text, xy=(i + corr, j + corr))\n\n plt.show()", "def plot_epochs(epochs, y, line):\n ep = np.arange(0, epochs)\n if hasattr(y[0], '__len__'):\n for i in range(len(y[0])):\n plt.plot(ep, [val[i] for val in y], line[i])\n else:\n plt.plot(ep, y, line)\n plt.show()", "def lineplots_on_grid(df,\n x_data: Union[str, List[str]],\n y_data: Union[str, List[str]],\n nrows: int,\n ncols: int,\n save_figure: bool=False,\n subplot_kwargs: dict=None,\n plot_kwargs: dict=None,\n save_kwargs: dict=None):\n # Prepare the configuration dicts\n subplot_kwargs = subplot_kwargs if subplot_kwargs else {}\n plot_kwargs = plot_kwargs if plot_kwargs else {}\n save_kwargs = save_kwargs if save_kwargs else {}\n\n if isinstance(x_data, str) and y_data=='_all':\n y_data = list(df.columns)\n\n # Prepare labels\n if isinstance(y_data, str):\n y_data = [y_data]\n if isinstance(x_data, str):\n x_data = [x_data] * len(y_data)\n elif len(x_data) != len(y_data):\n raise TypeError(\"'x_data' and 'y_data' must be of the same size. \"\n \"Received: {} (x-data), {} (y-data)\"\n \"\".format(len(x_data), len(y_data)))\n\n fig, axes = plt.subplots(nrows, ncols, squeeze=False, **subplot_kwargs)\n axes = axes.ravel()\n\n # Plot each (x, y) pair in a new grid cell\n for i, (x, y) in enumerate(zip(x_data, y_data)):\n\n lineplot(df, x_data=x, y_data=y, ax=axes[i],\n plot_kwargs=plot_kwargs)\n\n # Set labels\n axes[i].set_xlabel(x.replace('_', ' '))\n axes[i].set_ylabel(y.replace('_', ' '))\n\n if save_figure:\n fig.savefig(**save_kwargs)\n\n return fig, axes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw a table of all data used to chart y(x)
def draw_table(ax, dfs, legend, x, y): col_labels = dfs_all_values(dfs, x) column_legend = [] cell_text = [] # loop over all pandas.DataFrame objects for df in dfs: # to allow query y(x) easily df = df.set_index(x) df_row = df[y] # build a row with filled blanks '-' row = ["{:.2f}".format(df_row[column]) if column in df_row.index else '-' \ for column in col_labels] cell_text.append(row) ax.axis('tight') ax.axis('off') ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \ loc='top')
[ "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def plot_data(self):", "def sleep_table_plot(data, date):\n nrow, ncol = data.shape\n nrow += 1\n fig = plt.figure(figsize=(1, nrow*0.3))\n ax = fig.add_subplot(111)\n ax.axis('off')\n tbl = ax.table(cellText=data.to_numpy(),\n colLabels=data.columns,\n rowLabels=data.index,\n colWidths=[2.5]*3,\n cellLoc='center',\n loc='center')\n tbl.auto_set_font_size(False)\n tbl.set_fontsize(16)\n\n cell_dict = tbl.get_celld()\n for i, j in itertools.product(list(range(nrow)), [-1]+list(range(ncol))):\n if (i, j) != (0, -1):\n cell = cell_dict[(i, j)]\n text = cell_dict[(i, j)].get_text()\n cell.set_linewidth(1)\n cell.set_edgecolor('gray')\n #text.set_family('DINPro')\n text.set_weight('medium')\n if i == 0:\n cell.set_facecolor('gray')\n cell.set_height(0.15)\n text.set_color('white')\n text.set_family('Verdana')\n text.set_weight('bold')\n text.set_fontsize(12)\n if j == -1:\n cell.set_facecolor('green')\n text.set_color('white')\n text.set_family('Verdana')\n text.set_weight('bold')\n text.set_fontsize(12)\n tbl.scale(1, 2.5)\n ax.margins(0, 0)\n fig.savefig('img/sleep_table_'+date, bbox_inches='tight', pad_inches=0.1, dpi=200)", "def ax_table(ax, df):\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_frame_on(False)\n\n norm = plt.Normalize(3, 6)\n colours = plt.cm.hot(norm(df.values))\n the_table = ax.table(cellText=df.values.astype(int),\n rowLabels=df.index, colLabels=df.columns,\n colWidths = [0.1]*vals.shape[1],\n loc='center', fontsize = 16,\n cellColours=colours)\n\n return the_table", "def plot_table(self):\n key = ''\n if self.sender().objectName()==\"plot_mfdot_pushButton\":\n key = 'mfdot_array'\n current_dict = self.current_dict['injection']\n xlabel = \"Angle [deg]\"\n ylabel = \"Value [kg/s]\"\n title = \"Angle vs. Value\"\n elif self.sender().objectName()==\"plot_xbdot_pushButton\":\n key = 'xbdot_array'\n current_dict = self.current_dict['combustion']\n xlabel = \"Angle [deg]\"\n ylabel = \"Value [1/s]\"\n title = \"Angle vs. Value\"\n assert(key!='')\n\n if key not in current_dict or current_dict[key] == []:\n show_message(\"Not valid data\")\n return\n pg.setConfigOptions(background=None)\n pg.setConfigOptions(foreground='k')\n try:\n xdata = [item[0] for item in current_dict[key]]\n ydata = [item[1] for item in current_dict[key]]\n plot = pg.PlotWidget()\n set_plot(plot, xlabel, ylabel, \"\")\n dialog = QtWidgets.QDialog()\n dialog.setLayout(QtWidgets.QHBoxLayout())\n dialog.layout().addWidget(plot)\n dialog.setWindowTitle(title)\n plot.plot(xdata, ydata, pen={'color': 'r', 'width': 1})\n dialog.exec_()\n except:\n show_message(\"Cannot plot the current values\")\n return", "def make_plot_table (self, xs, ys = None, names = None, sigfig=0,\n community = None, fname = None):\n if type(xs) == DataFrame and len(xs.columns) > 1:\n x_name = xs.columns[0]\n y_name = list(xs.columns[1:])\n else:\n # todo:fix order\n x_name = xs.keys()[0]\n y_name = ys.keys()\n xs.update(ys)\n xs = DataFrame(xs)[[x_name] + y_name]\n\n if names is None:\n names = [x_name] + y_name\n\n header = []\n years = None\n anno = None\n for name in names:\n if name == 'annotation':\n header.append(\"{type: 'string', role: 'annotation'}\")\n anno = name\n else:\n header.append(\n \"{label: '\" +name[0].upper() + name[1:].lower()+ \\\n \"', type: 'number'}\"\n )\n\n if not anno is None:\n xs[anno+'_text'] = xs[anno]\n header.append(\"{type: 'string', role: 'annotationText'}\")\n\n for name in xs.columns:\n if name.lower() == 'year':\n years = name\n xs[name] = xs[name].astype(int)\n\n if not years is None:\n xs = xs[xs[years] <= self.max_year]\n\n plotting_table = xs.round(sigfig).values.tolist()\n\n\n if not community is None and not fname is None:\n cols = [c for c in \\\n xs.columns if c.lower().find('annotation_text') == -1]\n xs[cols].round(sigfig).to_csv(\n os.path.join(\n self.directory, community.replace(\"'\",\"\"),'csv', fname),\n index=False)\n\n for row in plotting_table:\n for idx in range(len(row)):\n if type(row[idx]) is long:\n row[idx] = int(row[idx])\n plotting_table.insert(0,header)\n #~ print plotting_table\n return plotting_table", "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def create_tables(times, accuracies, batch_sizes):\r\n #Get time data\r\n p_cpu_times = list(times[0].values())\r\n p_gpu_times = list(times[1].values())\r\n c_cpu_times = list(times[2].values())\r\n c_gpu_times = list(times[3].values())\r\n\r\n #Get differences in times\r\n p_diff_times = [a - b for a, b in zip(p_cpu_times, p_gpu_times)]\r\n c_diff_times = [a - b for a, b in zip(c_cpu_times, c_gpu_times)]\r\n cpu_diff_times = [a - b for a, b in zip(p_cpu_times, c_cpu_times)]\r\n gpu_diff_times = [a - b for a, b in zip(p_gpu_times, c_gpu_times)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_times,\r\n p_gpu_times,\r\n p_diff_times,\r\n c_cpu_times,\r\n c_gpu_times,\r\n c_diff_times,\r\n cpu_diff_times,\r\n gpu_diff_times]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Time (s)', 'P GPU Time (s)', 'P Diff (s)', 'C CPU Time (s)', 'C GPU Time (s)', 'C Diff (s)', 'CPU Diff (s)', 'GPU Diff (s)')\r\n row_colors = plt.cm.BuPu(np.linspace(0, 0.5, n_rows))\r\n col_colors = np.array([192/255,192/255,192/255, 1])\r\n col_colors = np.repeat(col_colors.reshape((1, col_colors.shape[0])), len(columns), axis=0)\r\n\r\n #Create table\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_time.png')\r\n\r\n\r\n #Get accuracy table\r\n #Get accuracy data\r\n p_cpu_accuracy = list(accuracies[0].values())\r\n p_gpu_accuracy = list(accuracies[1].values())\r\n c_cpu_accuracy = list(accuracies[2].values())\r\n c_gpu_accuracy = list(accuracies[3].values())\r\n\r\n #Get max of each batch\r\n p_cpu_max = [max(x) for x in p_cpu_accuracy]\r\n p_gpu_max = [max(x) for x in p_gpu_accuracy]\r\n c_cpu_max = [max(x) for x in c_cpu_accuracy]\r\n c_gpu_max = [max(x) for x in c_gpu_accuracy]\r\n\r\n #Get differences in accuracies\r\n p_diff_acc = [a - b for a, b in zip(p_cpu_max, p_gpu_max)]\r\n c_diff_acc = [a - b for a, b in zip(c_cpu_max, c_gpu_max)]\r\n cpu_diff_acc = [a - b for a, b in zip(p_cpu_max, c_cpu_max)]\r\n gpu_diff_acc = [a - b for a, b in zip(p_gpu_max, c_gpu_max)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_max,\r\n p_gpu_max,\r\n p_diff_acc,\r\n c_cpu_max,\r\n c_gpu_max,\r\n c_diff_acc,\r\n cpu_diff_acc,\r\n gpu_diff_acc]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Acc (%)', 'P GPU Acc (%)', 'P Diff (%)', 'C CPU Acc (%)', 'C GPU Acc (%)', 'C Diff (%)', 'CPU Diff (%)', 'GPU Diff (%)')\r\n\r\n #Create table\r\n plt.clf()\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_acc.png')", "def _update_table(self):\n self._ts_info_table.clear()\n y_list = self._parameter_root['y_axis'].value_names\n self._ts_info_table.setRowCount(len(y_list))\n self._ts_info_table.setColumnCount(3)\n row_labels = ['y(t0)', 'y(t1)', 'y(t1)-y(t0)']\n self._ts_info_table.setHorizontalHeaderLabels(row_labels)\n self._ts_info_table.setVerticalHeaderLabels(y_list)\n t0_nearest, t0_ind = self._get_t_value(\n self._parameter_root['t0'].value)\n t1_nearest, t1_ind = self._get_t_value(\n self._parameter_root['t1'].value)\n\n self._update_t(t0_nearest, t1_nearest)\n\n for ind, y_name in enumerate(y_list):\n self._update_y(ind, y_name, t0_ind, t1_ind)\n self._ts_info_table.resizeColumnsToContents()", "def draw_cells(self):\n pass", "def make_plot(x,y):", "def data_table(self, X, y, models_predictions):\n models_predictions = assess_models_names(models_predictions)\n base_color = self.plot_design.base_color_tints[0]\n\n # formatter for y and prediction columns to color and style them separately\n cols = [TableColumn(\n field=y.name,\n title=y.name,\n formatter=HTMLTemplateFormatter(template=self._model_column_template.format(color=base_color))\n )]\n\n # predictions\n _ = []\n i = 0\n for model, predictions in models_predictions:\n if i == 0:\n color = self.plot_design.models_color_tuple[0]\n i += 1\n else:\n color = self.plot_design.models_color_tuple[1]\n\n predictions = pd.Series(predictions, name=model).round(6)\n _.append(predictions)\n cols.append(\n TableColumn(\n field=model,\n title=model,\n formatter=HTMLTemplateFormatter(template=self._model_column_template.format(color=color)))\n )\n\n for col in X.columns:\n cols.append(TableColumn(field=col, title=col))\n scores = pd.DataFrame(_).T # by default, wide table is created instead of a long one\n\n # final DataFrame and DataTable\n df = pd.concat([y, scores, X], axis=1)\n source = ColumnDataSource(df)\n dt = DataTable(source=source, columns=cols, editable=False, sizing_mode=\"stretch_width\")\n\n return dt", "def draw_timing_table(timing_data):\n text = 'Job | Arrival Time | CPU Cycle | Finish Time | Turn. Time | Wait Time\\n'\n text = text + '=====================================================================\\n'\n total_cpu = 0\n total_ft = 0\n total_tt = 0\n total_wt = 0\n for row in timing_data:\n text += '%3s | %12s | %9s | %11s | %10s | %9s\\n' % (\n row[\"Job\"],\n row[\"Arrival Time\"],\n row[\"CPU Cycle\"],\n row[\"Finish Time\"],\n row[\"Turnaround Time\"],\n row[\"Wait Time\"],\n )\n total_cpu = total_cpu + row[\"CPU Cycle\"]\n total_ft = total_ft + row[\"Finish Time\"]\n total_tt = total_tt + row[\"Turnaround Time\"]\n total_wt = total_wt + row[\"Wait Time\"]\n\n text = text + \"---------------------------------------------------------------------\\n\"\n text = text + 'Total: %9d | %11d | %10d | %9d\\n' % (\n total_cpu,\n total_ft,\n total_tt,\n total_wt,\n )\n text = text + 'Average: %9f | %11f | %10f | %9f\\n' % (\n total_cpu / len(timing_data),\n total_ft / len(timing_data),\n total_tt / len(timing_data),\n total_wt / len(timing_data),\n )\n text = text + '=====================================================================\\n'\n print(text)", "def on_scatter_toolbar_table_click(self):\n #print('*** on table click ***')\n row = self.myTableWidget.currentRow()\n if row == -1 or row is None:\n return\n yStat = self.myTableWidget.item(row,0).text()\n self.myParent.replot()", "def render_table(self,data,col_width=1.9,row_height=0.625,font_size=14,\n header_color='#40466e',row_colors=['#f1f1f2','w'],edge_color='#f1f1f2',\n bbox=[0,0,1,1],header_columns=0,ax=None,**kwargs):\n if ax is None:\n #计算画板大小\n size=(np.array(data.shape[::-1])+np.array([0,1]))*np.array([col_width,row_height])\n fig,ax=plt.subplots(figsize=size)\n #关闭坐标轴\n ax.axis('off')\n #绘制表格 \n mpl_table=ax.table(cellText=data.values,bbox=bbox,colLabels=data.columns,**kwargs)\n #关闭文字设置\n mpl_table.auto_set_font_size(False)\n #设置文字大小\n mpl_table.set_fontsize(font_size)\n #遍历所有单元格\n for k,cell in six.iteritems(mpl_table._cells):\n #设置格子边框\n cell.set_edgecolor(edge_color)\n #判断是否为列标题\n if k[0]==0 or k[1]<header_columns:\n #文字加粗,颜色为白色\n cell.set_text_props(weight='bold',color='w')\n #设置边框颜色\n cell.set_facecolor(header_color)\n else:\n #设置单元格颜色\n cell.set_facecolor(row_colors[k[0]%len(row_colors)])\n #返回画板对象 \n return fig", "def paint_cells(self, data):\r\n if len(data) == 0: return\r\n col, row = zip(*data.keys())\r\n colors = tuple(data.values())\r\n if not isinstance(colors[0], Number):\r\n colors = [self.cdict[color] for color in colors] \r\n self.A[row, col] = colors\r\n self.plot()", "def task_table_plot(task_data):\n groups = task_data.Group.values\n task_no_group = task_data.drop('Group', axis=1)\n nrows, ncols = task_no_group.shape\n width, height = 1.0 / ncols, 1.0 / nrows\n\n fig, ax = plt.subplots(figsize=(1, nrows*0.25))\n ax.set_axis_off()\n tbl = Table(ax)\n tbl.auto_set_font_size(False)\n # Columns width for non-auto-width columns\n col_widths = [1, 1, 0.5, 1, 0.7, 0.7, 0.7, 0.7, 0.7]\n palette = get_palette()\n fontcolor = 'w'\n for (i, j), val in np.ndenumerate(task_no_group):\n fc = palette[groups[i]]\n fontsize = 10\n if j < 2:\n loc = 'left'\n font_family = None\n if j == 0:\n fontsize = 9\n else:\n loc = 'center'\n #font_family = 'DINPro'\n if j > 3:\n fontsize = 9\n tbl.add_cell(i, j, col_widths[j], height, text=val,\n loc=loc, facecolor=fc, edgecolor=fontcolor)\n cell = tbl.get_celld()[(i, j)]\n cell.set_linewidth(0.5)\n cell.set_text_props(color=fontcolor, family=font_family, weight='bold', fontsize=fontsize)\n\n # Column Labels...\n for j, label in enumerate(task_no_group.columns):\n tbl.add_cell(-1, j, col_widths[j], height*0.8, text=label, loc='center',\n facecolor='gray', edgecolor='w')\n cell = tbl.get_celld()[(-1, j)]\n cell.set_linewidth(0.5)\n cell.set_text_props(color=fontcolor, weight='bold', family='Verdana', fontsize=9)\n\n tbl._autoColumns = [0, 1]\n tbl.scale(1, 1.5) # scale y to cover blank in the bottom\n ax.add_table(tbl)\n ax.margins(0, 0)\n fig.savefig('img/task_table', bbox_inches='tight', pad_inches=0.1, dpi=200)", "def plotly_table(df: pd.DataFrame):\n table_data = [df[x] for x in list(df.columns)]\n table_data.insert(0, df.index.to_list())\n fig = go.Figure(data=[go.Table(header=dict(values=[\"index\"]+list(df.columns)),\n cells=dict(values=table_data))])\n return fig", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Going to a nonchunkadmin URL should be ok, and should also put the `_data_changed` parameter onto the URL.
def test_to_other_url(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() request = RequestFactory().get('/') response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/') admin_instance = get_modeladmin(Iframe) new_response = admin_instance.maybe_fix_redirection( request=request, response=response_302, obj=user) self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa self.assertEqual(302, new_response.status_code) self.assertEqual('/admin_mountpoint/?_data_changed=1', new_response['Location'])
[ "def test_return_admin_url(self):\n\t\tself.assertEqual(admin_handler.AdminHandler().return_admin_url(''), 'http://sport1_admin.app.endstand.de')\n\t\tself.assertEqual(admin_handler.AdminHandler().return_admin_url('ergebnisDienst'), 'http://master.dynamic.ergebnis-dienst.de')", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def model_url_apod_changed(self, data):\n self.__view.update_url(data)", "def test_user_change_page(self):\n # run\n url = reverse('admin:core_user_change', args=[self.simple_user.id])\n resp = self.client.get(url)\n # assert\n self.assertEqual(resp.status_code, 200)", "def get(self):\n self.redirect('/admin')", "def module_views():\n check_admin()\n return redirect(url_for('home.homepage'))", "def test_not_logged_cannot_update_tab(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def _visit_plugin_admin_page(self):\n\t\tself._blog.make_admin_request(\n\t\t\turl_join(self._blog.admin_page_url, self._PLUGIN_PAGE_URL))", "def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def test_has_change_permission_returns_false_for_invalid_request(self):\n model_admin = MenuItemAdmin(MenuItem, admin.AdminSite())\n request = RequestFactory().get(\"/admin\")\n request.user = self.get_superuser()\n self.assertFalse(model_admin.has_change_permission(request))", "def test_logentry_get_admin_url(self):\n logentry = LogEntry.objects.get(content_type__model__iexact=\"article\")\n expected_url = reverse(\n \"admin:admin_utils_article_change\", args=(quote(self.a1.pk),)\n )\n self.assertEqual(logentry.get_admin_url(), expected_url)\n self.assertIn(\"article/%d/change/\" % self.a1.pk, logentry.get_admin_url())\n\n logentry.content_type.model = \"nonexistent\"\n self.assertIsNone(logentry.get_admin_url())", "def url(update, context):\n update.message.reply_text('Working...')\n user = update.message.from_user\n url = update.message.text\n link = call_backend(url)\n\n store_stats(user, url, link)\n update.message.reply_text('Short link: {}'.format(link))", "def test_partial_update_website(self):\n pass", "def endpoint(request):\n try:\n if request.method == \"GET\":\n return changes_GET(request)\n else:\n return HttpResponseNotAllowed([\"GET\"])\n except:\n return utils.exception_response()", "def test_file_upload_page_shows(self):\n url = reverse(\"comicmodels.views.upload_handler\",\n kwargs={\"site_short_name\":self.testproject.short_name})\n self._test_url_can_be_viewed(self.root,url) \n #self._test_url_can_be_viewed(self.root.username,url)", "def handle_publish_admin(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If `_autoclose` is in the URL, that + `_data_changed` should propagate to the next redirect URL for the purposes of our adminlinks JS.
def test_autoclose_chunkadmin(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() admin_instance = get_modeladmin(Iframe) self.assertIsInstance(admin_instance, RealishAdmin) request = RequestFactory().get('/', { '_autoclose': 1, }) request.user = user iframe_admin = reverse('admin:embeds_iframe_add') response_301 = HttpResponsePermanentRedirect(redirect_to=iframe_admin) ct = get_content_type(User) iframe = Iframe(position=2, region='test', content_type=ct, content_id=user.pk, url='https://news.bbc.co.uk/') iframe.full_clean() iframe.save() new_response = admin_instance.maybe_fix_redirection( request=request, response=response_301, obj=iframe) self.assertEqual(new_response['X-Chunkadmin-Response'], 'autoclose') self.assertEqual(301, new_response.status_code) location, querystring = new_response['Location'].split('?') self.assertEqual('/admin_mountpoint/embeds/iframe/add/', location) self.assertIn('region=test', querystring) self.assertIn('_data_changed=1', querystring) self.assertIn('_autoclose=1', querystring) self.assertIn('content_type={0}'.format(ct.pk), querystring) self.assertIn('content_id={0}'.format(iframe.pk), querystring)
[ "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def URL_redirect():\n # query db for long url\n # whenever this short URL is clicked on, redirect to a long URL\n # count click rate\n return redirect(\"/<long_url>\")", "def model_url_apod_changed(self, data):\n self.__view.update_url(data)", "def response_post_save_change(self, request, obj):\n return HttpResponseRedirect(obj.get_absolute_url())", "def response_post_save_add(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def redirect_on_exit_url(self, redirect_on_exit_url):\n\n self._redirect_on_exit_url = redirect_on_exit_url", "def handle_cancel(self, action, data):\n if not self._next_url:\n self._next_url = url.absoluteURL(self.__parent__, self.request)\n self.request.response.redirect(self._next_url)", "def urlback(self, urlback):\n\n self._urlback = urlback", "def log_url_spide(self):\n pass", "def persist_url(self, url, shortened_url):\n # some_function_that_saves_if_dont_exists_already(url, shortened_url)\n pass", "def response_post_save_change(self, request, obj):\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)", "def response_post_save_add(self, request, obj):\n return HttpResponseRedirect(obj.get_absolute_url())", "def handle_ledger_closed(self, data):\n if data.ledger_index > self.status.ledger_index:\n self.status.ledger_index = data.ledger_index\n self.status.ledger_time = data.ledger_time\n self.status.reserve_base = data.reserve_base\n self.status.reserve_inc = data.reserve_inc\n self.status.fee_base = data.fee_base\n self.status.fee_ref = data.fee_ref\n self.emitter.emit('ledger_closed', data)", "def _after_response(self):\n pass", "def redirect(url):", "def setAutoClose(self, state):\r\n data = self.getData()\r\n data['auto_close'] = state\r\n self.updateData(data)", "def fix_backlinks(self,req):\n for p in self.list(stage='posted',kind='page'):\n p.store_backlinks()\n req.message='backlinks for all pages updated'\n return self.redirect(req)", "def close(request, post, **kwargs):\n user = request.user\n Post.objects.filter(uid=post.uid).update(status=Post.CLOSED)\n # Generate a rationale post on why this post is closed.\n rationale = mod_rationale(post=post, user=user,\n template=\"messages/closed.md\")\n msg = \"closed\"\n url = rationale.get_absolute_url()\n messages.info(request, mark_safe(msg))\n auth.db_logger(user=user, text=f\"{msg}\", post=post)\n return url", "def shorten_urls(self):\n\n # only bother processing if we have a connection\n if self.has_connection:\n data = self.check_clipboard()\n\n if data and data != self.data:\n logger.info('Found new clipboard data to process.')\n logger.debug('Old data: %s' % (self.data, ))\n logger.debug('New data: %s' % (data, ))\n self.process_clipboard(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if continue editing is hit, it should go back to the parent URL, I think?
def test_continue_editing_parent_object(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() admin_instance = get_modeladmin(Iframe) self.assertIsInstance(admin_instance, RealishAdmin) request = RequestFactory().get('/', { '_continue': 1, }) request.user = user iframe_admin = reverse('admin:embeds_iframe_add') response_301 = HttpResponsePermanentRedirect(redirect_to=iframe_admin) ct = get_content_type(User) iframe = Iframe(position=2, region='test', content_type=ct, content_id=user.pk, url='https://news.bbc.co.uk/') iframe.full_clean() iframe.save() new_response = admin_instance.maybe_fix_redirection( request=request, response=response_301, obj=iframe) self.assertEqual(new_response['X-Chunkadmin-Response'], 'redirect-to-parent') self.assertEqual(301, new_response.status_code) self.assertEqual('/admin_mountpoint/auth/user/1/?_data_changed=1', new_response['Location'])
[ "def view_edit_no_id():\n return redirect(url_for('home.homepage'))", "def test_redirect_from_edit_exists(self):\n response = self.client.get('/edit/', follow=True)\n self.assertEqual(response.status_code, 200)", "def response_post_save_change(self, request, obj):\n return HttpResponseRedirect(obj.get_absolute_url())", "def test_edit_page_redirects(self):\n response = self.client.get('/edit/', follow=True)\n last_redirect = response.redirect_chain[-1][0]\n self.assertTrue(\"/accounts/login/?next=/edit/\" in last_redirect)", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def get_edit_url(self):\n return ''", "def test_edit_view(self):\n target_url = url_for('content.edit_content')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def cancel(self, **params):\n if params[\"edit_mode\"]:\n blog = self.get_post_from_cookie()\n self.redirect(\"/blog/{}\".format(blog.key.id()))\n else:\n self.redirect(\"/blog/\")", "def test_menuitem_add_view_redirects_on_save_continue(self):\n menu_content = factories.MenuContentWithVersionFactory()\n add_url = reverse(\n \"admin:djangocms_navigation_menuitem_add\", args=(menu_content.id,)\n )\n content_type = ContentType.objects.get(app_label=\"cms\", model=\"page\")\n page = factories.PageContentFactory().page\n data = {\n \"title\": \"My new Title\",\n \"content_type\": content_type.pk,\n \"object_id\": page.pk,\n \"_ref_node_id\": menu_content.root.id,\n \"numchild\": 1,\n \"link_target\": \"_blank\",\n \"_position\": \"first-child\",\n \"_continue\": ['Save and continue editing'],\n }\n\n response = self.client.post(add_url, data)\n new_child = MenuItem.objects.exclude(pk=menu_content.root.pk).get()\n\n self.assertEqual(\n response.url,\n reverse(\n \"admin:djangocms_navigation_menuitem_change\",\n kwargs={\"menu_content_id\": menu_content.pk, \"object_id\": new_child.id},\n )\n )\n\n response = self.client.post(add_url, data, follow=True)\n child = MenuItem.objects.exclude(pk__in=[menu_content.root.pk, new_child.id]).get()\n msg = _('Menuitem %(menuitem)s was changed successfully. You can edit it below') % {'menuitem': child.id}\n\n self.assertContains(response, msg)", "def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)", "def openEditPage(self):\n self.controller.showFrame('EditPage')", "def start_editing(self):\r\n if self._mode is None:\r\n self._mode = 'edit'\r\n params = {\r\n 'f' : 'json',\r\n 'sessionID' : self._guid\r\n }\r\n url = \"%s/startEditing\" % self._url\r\n res = self._con.post(url, params)\r\n return res['success']\r\n return False", "def editPage(request, title):\n entry = util.get_entry(title)\n if request.method == \"POST\":\n # check if the data is valid then save/replace old data\n form = editPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"editTitle\"]\n content = form.cleaned_data[\"editBody\"]\n\n util.save_entry(title, content)\n\n # take user to their editted page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # give user a editting form with existing data filled in by defult. \n else:\n editForm = editPageForm(initial={\n \"editTitle\": title,\n \"editBody\": entry\n })\n editFormTitle = editForm[\"editTitle\"]\n editFormBody = editForm[\"editBody\"]\n return render(request, \"encyclopedia/editPage.html\", {\n \"formTitle\": editFormTitle,\n \"formBody\": editFormBody\n })", "def editLink(self):\r\n if not self.securityManager.checkPermission('Modify portal content', self.context):\r\n return None\r\n if self.contextState.is_locked():\r\n return self.context.absolute_url() + \"/@@cmsui-lock-info\"\r\n objectActions = self.contextState.actions('object')\r\n for action in objectActions:\r\n if action['id'] == self.settings.editActionId:\r\n return \"%s?last_referer=%s\" % (action['url'], self.context.absolute_url())\r\n return None", "def on_base_bt_edit_goback_clicked(self, widget, data=None):\n if not self._check_state(DataState.INSERTING, DataState.EDITING):\n super(WizardController, self).on_base_bt_edit_goback_clicked(widget)\n else:\n self.prev()", "def after_successful_edit(self):\n pass", "def redirect_parent(context, request):\n comments = find_interface(context, ICommentsFolder)\n return HTTPFound(location=request.resource_url(comments.__parent__, anchor='comments'))", "def edit_self(self):\n if self.id:\n return '<a href=\"%s\">' \\\n 'Edit Page</a>' % admin_url(self.__class__, \"change\",\n self.id)\n return ''", "def response_post_save_add(self, request, obj):\n return HttpResponseRedirect(obj.get_absolute_url())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate immediate (different by one mismatch) neighbours of the given genome pattern
def _generate_immediate_neighbours(pattern: str) -> list: generated = [] for i in range(len(pattern)): if pattern[i] == 'A': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A]) elif pattern[i] == 'C': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C]) elif pattern[i] == 'T': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T]) elif pattern[i] == 'G': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G]) return generated
[ "def generate_neighbors_8_connect(current_cell, array_width):\n x_coordinate = current_cell / array_width\n y_coordinate = current_cell % array_width\n\n if current_cell == 0:\n west_neighbor = -1\n north_neighbor = -1\n north_west_neighbor = -1\n north_east_neighbor = -1\n\n elif x_coordinate == 0:\n north_neighbor = -1\n north_west_neighbor = -1\n north_east_neighbor = -1\n west_neighbor = (array_width * x_coordinate) + (y_coordinate - 1)\n\n elif y_coordinate == 0:\n west_neighbor = -1\n north_west_neighbor = -1\n north_neighbor = (array_width * (x_coordinate - 1)) + y_coordinate\n north_east_neighbor = (array_width * (x_coordinate - 1)) + \\\n (y_coordinate + 1)\n\n elif y_coordinate == array_width - 1:\n west_neighbor = (array_width * x_coordinate) + (y_coordinate - 1)\n north_neighbor = (array_width * (x_coordinate - 1)) + y_coordinate\n north_west_neighbor = (array_width * (x_coordinate - 1)) + \\\n (y_coordinate - 1)\n north_east_neighbor = -1\n\n else:\n west_neighbor = (array_width * x_coordinate) + (y_coordinate - 1)\n north_neighbor = (array_width * (x_coordinate - 1)) + y_coordinate\n north_west_neighbor = (array_width * (x_coordinate - 1)) + \\\n (y_coordinate - 1)\n north_east_neighbor = (array_width * (x_coordinate - 1)) + \\\n (y_coordinate + 1)\n\n return west_neighbor, north_west_neighbor, north_neighbor, \\\n north_east_neighbor", "def neighbours(self):\n for y in range(self.y - 1, self.y + 2):\n for x in range(self.x - 1, self.x + 2):\n if self.x != x or self.y != y:\n neighbour = self.g.at(x, y)\n if neighbour:\n yield neighbour", "def neighbors(pattern, d):\n\n if d == 0:\n return [pattern]\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n neighborhood = []\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n for text in suffix_neighbors:\n hdist = compute_hamming_distance(suffix_pattern, text)\n if hdist < d:\n for n in ['A', 'C', 'G', 'T']:\n neighbor = n + text\n neighborhood.append(neighbor)\n else:\n neighbor = pattern[0] + text\n neighborhood.append(neighbor)\n return neighborhood", "def generateNeighbor(self, aSolution, neighborhoodSize):", "def seq_neighborhood(seq, n_subs=1):\n for positions in combinations(range(len(seq)), n_subs):\n # yields all unique combinations of indices for n_subs mutations\n for subs in product(*(\"ATGCN\",)*n_subs):\n # yields all combinations of possible nucleotides for strings of length\n # n_subs\n seq_copy = list(seq)\n for p, s in zip(positions, subs):\n seq_copy[p] = s\n yield ''.join(seq_copy)", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def neighbors_generator(state: str, nurses_number=10) -> str:\n\n genes = 21 * nurses_number\n\n # Random index to change and generated the neighbor\n index = randrange(0, genes)\n\n # Here we're taking the first part of the state before the bit that will be modified\n new_state = state[0:index]\n\n # Here is modified the bit\n if state[index] == '0':\n new_state += '1'\n else:\n new_state += '0'\n\n # Here we're taking the last part of the state passed\n new_state += state[index+1:]\n\n # Here is returned the new state and the next bit to be modified\n return new_state", "def _get_neighbours(kmer):\n assert (is_dna(kmer))\n bases = 'ACTG'\n result = set()\n for i in range(len(kmer)):\n for base in bases:\n result.add(kmer[:i] + base + kmer[(i + 1):])\n return result", "def get_neighbours(self) -> Generator['Position', None, None]:\n for dc in range(-1, 2):\n for dy in range(-1, 2):\n if dc != 0 or dy != 0:\n p = self + Vector2(dc, dy)\n if p.is_valid():\n yield p", "def get_neighbours(state):\n row, col = state\n return [\n (row, col), # -\n (row - 1, col), # N\n (row, col + 1), # E\n (row + 1, col), # S\n (row, col - 1), # W\n ]", "def find_neighbours(engine, field, features):\n code = CodeSegment(engine)\n N = len(engine.q)\n Nf = 3 ** engine.pm.ndim\n code.assign(x=Literal(numpy.zeros((N, Nf))), y='features')\n grid = engine.pm.generate_uniform_particle_grid(shift=0)\n for i in range(Nf):\n ii = i\n a = []\n for d in range(engine.pm.ndim):\n a.append(ii % 3 - 1)\n ii //= 3\n\n grid1 = grid + numpy.array(a[::-1]) * (engine.pm.BoxSize / engine.pm.Nmesh)\n layout = engine.pm.decompose(grid1)\n code.readout(x=Literal(grid1), mesh='field', value='feature1', layout=Literal(layout), resampler='nearest')\n code.assign_component(attribute='features', value='feature1', dim=i)\n return code", "def neighbours_of(self, pos):\n return [pos - 64, pos - 1, pos + 1, pos + 64]", "def find_pattern(pattern, genome):\n\n tens_table = [pow(10, m) for m in xrange(len(pattern))]\n hash_pattern = get_hash(pattern, tens_table)\n index = []\n for current_index in xrange(len(genome) - len(pattern) + 1):\n\t\tif current_index == 0:\n\t\t\tcurrent_hash = get_hash(genome[0:len(pattern)], tens_table)\n\t\telse:\n\t\t\tcurrent_hash = ((current_hash - (nucleotide_value_map[genome[current_index-1]] * tens_table[len(pattern)-1])) * 10 + nucleotide_value_map[genome[current_index-1+len(pattern)]])\n if current_hash == hash_pattern:\n index.append(current_index)\n return index", "def getNeighbours(image,i,j):\n nbg = []\n for k in arange(i-1, i+2):\n for l in arange(j-1, j+2):\n try:\n nbg.append(image[k,l])\n except BaseException as e:\n print (e)\n return array(nbg)", "def neighbors(pattern, d):\n if d == 0:\n return pattern\n\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n\n neighborhood = []\n\n # ##########\n # We can use recursion to successively compute neighbors(suffix(pattern), d),\n # where suffix(pattern) = pattern[1:]\n #\n # The reason being: if we have neighbors(suffix(pattern, d)), then we know\n # that the Hamming Distance between `pattern` and `suffix(pattern)` is either equal\n # to d or less than d.\n #\n # In the first case, we can add `pattern[0]` to the beginning of\n # `suffix(pattern)` in order to obtain a k-mer belonging to\n # Neighbors(Pattern, d). In the second case, we can add any symbol\n # to the beginning of `suffix(pattern)` and obtain a k-mer belonging\n # to Neighbors(Pattern, d).\n # ##########\n\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n\n for i in range(len(suffix_neighbors)):\n\n neighboring_pattern_text = suffix_neighbors[i]\n\n if hamming_distance(suffix_pattern, neighboring_pattern_text) < d:\n for n in _NUCLEOTIDES:\n neighborhood.append(n + neighboring_pattern_text)\n\n else:\n neighborhood.append(pattern[0] + neighboring_pattern_text)\n\n return neighborhood", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def rand_neumann(matrix, i, j, offspring):\n\n neighbors = []\n neighbors_inds = []\n try:\n if not((i - 1) < 0):\n if offspring[i-1][j] > 0:\n neighbors.append(matrix[i-1][j])\n neighbors_inds.append((i-1, j))\n except:\n pass\n\n\n try:\n if offspring[i][j-1] > 0:\n neighbors.append(matrix[i][j-1])\n neighbors_inds.append((i, j-1))\n except:\n pass\n\n\n try:\n if offspring[i+1][j] > 0:\n neighbors.append(matrix[i+1][j])\n neighbors_inds.append((i+1, j))\n\n except:\n pass\n\n try:\n if offspring[i][j+1] > 0:\n neighbors.append(mat[i][j+1])\n neighbors_inds.append((i, j+1))\n except:\n pass\n\n return neighbors_inds" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the given card matches this card
def is_match(self, card): return self.suit == card.suit or self.value == card.value
[ "def _validate_card_match(self, chosen_card, active_card, active_suit):\n\t\treturn chosen_card.is_match(active_card) or chosen_card.suit == active_suit", "def equals(self, card):\n ERROR = \"card '{}' is not a Card type\".format(card)\n assert isinstance(card, Card), ERROR\n\n if (self.getRank() == card.getRank() and \n self.getSuit() == card.getSuit()):\n return True\n return False", "def __eq__(self, card):\n return self.value == card.value and self.suit == card.suit", "def __eq__(self, other_card):\n if self.rank == other_card.rank or self.suit == other_card.suit:\n return True\n else:\n return False", "def check_cards(self, cards):\n if len(cards) != 3:\n return False\n\n match = 0\n card1 = cards[0][1]\n card2 = cards[1][1]\n card3 = cards[2][1]\n\n match += self.compare_element(card1, card2, card3, 'shape')\n match += self.compare_element(card1, card2, card3, 'colour')\n match += self.compare_element(card1, card2, card3, 'count')\n match += self.compare_element(card1, card2, card3, 'fill')\n\n return match == 4", "def check_card(card1, card2):\r\n\r\n num1 = card1.split(' ')[0]\r\n num2 = card2.split(' ')[0]\r\n\r\n if num1 == num2:\r\n return True\r\n else:\r\n return False", "def __eq__(self, card2):\n return self.suit == card2.suit and self.rank == card2.rank", "def has_card(ck_card):\r\n for card in my_hand:\r\n if ck_card == card:\r\n return True # possible is in list\r\n return False # Not in list\r", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def __eq__(self, other: Card) -> bool:\n return compare_map[self.number] == compare_map[other.number]", "def compare_cards(self, card1, card2, lead_card=None):\n if self.lead_card is not None:\n lead_card = self.lead_card\n if card1.symbol == card2.symbol:\n return card1.value > card2.value\n elif card1.symbol == self.trump_symbol:\n return True\n elif card2.symbol == self.trump_symbol:\n return False\n elif card1.symbol == lead_card.symbol:\n return True\n elif card2.symbol == lead_card.symbol:\n return False\n else:\n #print('not good loc')\n return False", "def has_all_cards(self, cards):\n return self.available_cards_num == sum([c.number for c in cards])", "def _is_valid_card(self, played_card):\n if not self.cards_on_table.cards:\n return True\n if self.cards_on_table[0].same_suit_or_trumpf(played_card):\n return True\n if not self._same_suit_in_cards(self.cards_on_table[0]):\n return True\n\n return False", "def __eq__(self, other):\n return (isinstance(other,Card) and\n (self.suit, self.rank) == (other.suit, other.rank))", "def validate_card(card_in_play, hand, card_choice_index):\n card_choice = hand[card_choice_index]\n\n if card_choice == \"Draw\":\n return True\n elif card_in_play[0] == card_choice[0]: # color\n return True\n elif card_in_play[1] == card_choice[1]: # number\n return True\n else: # invalid card\n return False", "def _same_suit_in_cards(self, check_card):\n return any(check_card.same_suit_or_trumpf(card) for card in self.turn.cards)", "def playersHaveEqualCards(self):\n numOfCards = self.players[0].numberOfCards()\n if numOfCards == 0: return False\n for player in self.players:\n if numOfCards != player.numberOfCards(): return False\n return True", "def __contains__(self, obj) -> bool:\n for card in self._cards:\n if isinstance(obj, str):\n if card.name == obj:\n return True\n elif card == obj:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensures that chosen_card is an acceptable match, given the active_card and active_suit
def _validate_card_match(self, chosen_card, active_card, active_suit): return chosen_card.is_match(active_card) or chosen_card.suit == active_suit
[ "def set_chosen_card(self, allowed_cards, chosen_card):\n if self.action is not None:\n if self.action in allowed_cards:\n logger.info(f\"Successfully chose the card: {self.action}\")\n chosen_card = self.action\n else:\n logger.error(f\"{self.action} is not a valid card! Choosing the first allowed card now.\")\n else:\n logger.debug(\"chosen card is None\")\n return chosen_card", "def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value", "def test_choose_other_card(self):\n self.center.hit(None)\n self.center.chose_other_card(self.bakery)\n self.assertEqual(self.center.trade_other, self.bakery)\n self.assertEqual(self.center.trade_owner, None)\n self.assertEqual(self.game.state_cards, [self.center])", "def ai_choose_card(turn: 'Turn'):\n\n sorted_opp = dict(sorted(turn.active.opp_choices.items(), key=lambda x: x[0]._sort_order))\n for opp, cards in sorted_opp.items():\n # Since the set of remembered cards is not empty\n # filtering out the None's will keep all matching indexes\n matches = [\n *itt.filterfalse(lambda x: x is None, map(turn.active.hand.has_match, cards))]\n if matches:\n # Set turn state values and return\n turn.opponent = opp\n turn.wanted_card = turn.active.hand.stack[matches.pop()]\n return turn\n if not turn.wanted_card:\n turn.wanted_card = random.choice(turn.active.hand.stack)\n return turn", "def test_choose_own_card_twice(self):\n self.center.hit(None)\n self.center.chose_own_card(self.wheat)\n self.assertEqual(self.center.trade_owner, self.wheat)\n self.assertEqual(self.center.trade_other, None)\n self.assertEqual(self.game.state_cards, [self.center])\n self.center.chose_own_card(self.ranch)\n self.assertEqual(self.center.trade_owner, self.ranch)\n self.assertEqual(self.center.trade_other, None)\n self.assertEqual(self.game.state_cards, [self.center])", "def test_choose_own_card(self):\n self.center.hit(None)\n self.center.chose_own_card(self.wheat)\n self.assertEqual(self.center.trade_owner, self.wheat)\n self.assertEqual(self.center.trade_other, None)\n self.assertEqual(self.game.state_cards, [self.center])", "def validate_card(card_in_play, hand, card_choice_index):\n card_choice = hand[card_choice_index]\n\n if card_choice == \"Draw\":\n return True\n elif card_in_play[0] == card_choice[0]: # color\n return True\n elif card_in_play[1] == card_choice[1]: # number\n return True\n else: # invalid card\n return False", "def testSelectValidCards_OutSuit(self):\n cards = [card.Card('2', card.DIAMONDS),\n card.Card('2', card.CLUBS),\n card.Card('3', card.SPADES),\n card.Card('4', card.CLUBS),\n card.Card('4', card.HEARTS)]\n hand1 = hand.Hand()\n\n for c in cards:\n hand1.add_card(c)\n\n selected_cards = rules.select_valid_cards(\n card.CLUBS, hand1, [card.Card('K', card.HEARTS)])\n for c in cards:\n self.assertTrue(c in selected_cards)", "def testSelectValidCards_InSuit(self):\n in_suit_cards = [card.Card('2', card.CLUBS),\n card.Card('3', card.CLUBS),\n card.Card('4', card.CLUBS)]\n out_suit_cards = [card.Card('2', card.DIAMONDS),\n card.Card('4', card.HEARTS)]\n hand1 = hand.Hand()\n # Mix up the cards just in case.\n hand1.add_card(out_suit_cards[0])\n for c in in_suit_cards:\n hand1.add_card(c)\n hand1.add_card(out_suit_cards[1])\n\n selected_cards = rules.select_valid_cards(card.CLUBS, hand1,\n [card.Card('K', card.CLUBS)])\n for c in in_suit_cards:\n self.assertTrue(c in selected_cards)\n for c in out_suit_cards:\n self.assertFalse(c in selected_cards)", "def select_best_card(self, suit, cards_played):\n # For now let's make it really dumb.\n\n # Just pick a random valid card.\n return random.choice(\n rules.select_valid_cards(suit, self.player.hand,\n cards_played))", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def testSelectValidCards_InSuitMissing(self):\n cards = [card.Card('2', card.DIAMONDS),\n card.Card('2', card.CLUBS),\n card.Card('3', card.CLUBS),\n card.Card('4', card.CLUBS),\n card.Card('4', card.HEARTS)]\n hand1 = hand.Hand()\n\n for c in cards:\n hand1.add_card(c)\n\n selected_cards = rules.select_valid_cards(\n card.CLUBS, hand1, [card.Card('K', card.SPADES)])\n for c in cards:\n self.assertTrue(c in selected_cards)", "def test_scoring_card():\n r = ScoringCard(\"YYYBBB (3)\")\n assert r.victory_point == 3\n assert r.target == Resource(\"BBBYYY\")\n\n assert r.check_enough(Resource(\"YYBB\")) is False\n assert r.check_enough(Resource(\"YYYYBBBBG\")) is True", "def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0", "def validate_play(game, player, card):\n if not card in player.hand.cards:\n return \"Cards must be in the passing players hand.\"\n # if player has suit, they must match it. if they don't have suit, play anything and break hearts\n if len(game.gamespace[GAME_PLAYED_CARDS]) != 0:\n hand = player.hand\n lead = game.gamespace[GAME_PLAYED_CARDS][0]\n for c in hand:\n if lead.suit == c.suit and c.suit != card.suit:\n return \"If you can match the lead of the trick, you must do so.\"\n return \"\"", "async def wait_for_card(self, game):\r\n\r\n # Get all the valid cards that can be current chosen\r\n valid_cards = game.get_valid_cards()\r\n\r\n # Check if the player is an AI, choose a random valid card\r\n if self.is_ai:\r\n await sleep(2)\r\n return choice(valid_cards)\r\n\r\n # The player is a real person\r\n else:\r\n\r\n # Send the user a message asking which card they want to place down\r\n # as long as it's a valid card\r\n message = await self.member.send(\r\n embed = Embed(\r\n title = \"Your Turn!\",\r\n description = \"{}\\n{}\".format(\r\n \"Current Card: <{}>\".format(game.card),\r\n \"Your Cards: {}\".format(\" \".join([\r\n \"<{}>\".format(card) for card in self.cards\r\n ]))\r\n ),\r\n colour = await get_embed_color(self.member)\r\n )\r\n )\r\n\r\n # Add the valid cards as reactions to the message\r\n # and ask the user to choose a valid card\r\n # add the LEAVE reaction too in case the player wants to leave the game\r\n for card in valid_cards:\r\n await message.add_reaction(card)\r\n await message.add_reaction(LEAVE)\r\n reaction, user = await game.bot.wait_for(\"reaction_add\", check = lambda reaction, user : (\r\n reaction.message.id == message.id and\r\n user.id == self.member.id and\r\n (str(reaction).replace(\"<\", \"\").replace(\">\", \"\") in valid_cards or str(reaction) == LEAVE)\r\n ))\r\n if str(reaction) not in [ DRAW_UNO, LEAVE ]:\r\n self.cards.remove(str(reaction).replace(\"<\", \"\").replace(\">\", \"\"))\r\n return str(reaction).replace(\"<\", \"\").replace(\">\", \"\")", "def test_choose_player_with_sufficient_funds_with_cup_bread_bonus(self):\n self.player.has_bread_cup_bonus = True\n self.player.money = 0\n self.player2.money = 6\n self.station.hit(None)\n self.assertEqual(self.game.state_cards, [self.station])\n self.station.chose_player(self.player2)\n self.assertEqual(self.player.money, 5)\n self.assertEqual(self.player2.money, 1)\n self.assertEqual(self.game.state_cards, [])\n self.assertEqual(self.game.state, self.game.STATE_PURCHASE_DECISION)", "def bot_decide_card(bot_state):\n cards = bot_state.allowed_cards\n if len(cards):\n # print(\"Cnt of Allowed Cards:\", len(cards))\n cards.sort(key=compare)\n for card_val in cards:\n # print(\"Allowed Card\", card_val)\n number, category = card_val.split(\" of \")\n if number in ['10', '11', '12', '13']:\n print(\"Special Card Available, hence Played\")\n return card_val\n color = max_freq_color_in_allowed_cards(bot_state)\n print(\"Most Frequent Color is\", color)\n frq_of_numbers = {}\n max_freq = 0\n card_play_val = None\n for card_val in cards:\n number, category = card_val.split(\" of \")\n if category == color:\n if frq_of_numbers.get(number, None) is not None:\n frq_of_numbers[number] += 1\n else:\n frq_of_numbers[number] = 1\n if frq_of_numbers[number] > max_freq:\n max_frq = frq_of_numbers[number]\n card_play_val = card_val\n print(\"Decided Card to play is\", card_play_val)\n return card_play_val\n else:\n return \"DRAW_CARD\"", "def validate_cards(*cards):\n all_card_vals = [\"A\", \"K\", \"Q\", \"J\", \"T\"] + [str(i) for i in range(2, 10)]\n all_suit_vals = [\"H\", \"D\", \"S\", \"C\"]\n\n for card in cards:\n card = card.upper()\n\n if UNIQUE_CARD_COUNT[card] > 0:\n msg = \"{} card has been played more than once.\".format(card)\n raise ValueError(msg)\n else:\n UNIQUE_CARD_COUNT[card] += 1\n\n if not card[0] in all_card_vals:\n msg = \"'{}' is not a valid card value\".format(card[0])\n raise ValueError(msg)\n\n if not card[1] in all_suit_vals:\n msg = \"'{}' is not a valid card suit\".format(card[1])\n raise ValueError(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If test_mode is True, an image of `screen` is saved
def save_screen(screen): if not video_mode: # Don't record video return False # Make global variables writeable global current_frame global path_checked frames_directory = os.path.dirname( os.path.dirname( os.path.realpath(__file__))) + "\\frames\\" if not path_checked: check_folder(frames_directory) pygame.image.save( screen, frames_directory + "ants-frame{}.jpeg".format( str(current_frame).zfill(4))) current_frame += 1 # Move count to next frame
[ "def save_snapshot(mode='project'):\n p = viewport.get_model_panel()\n cam_name = cmds.modelEditor(p, query=True, camera=True).replace(':', '_')\n curr_file_name = cmds.file(query=True, sceneName=True, shortName=True)\n nice_file_name = '{0:%Y%m%d_%H%M%S}_{1:s}_{2:s}'.format(datetime.datetime.now(),\n curr_file_name.strip('.ma'),\n cam_name)\n\n desktop_folder = fs.get_desktop_folder()\n image_file = desktop_folder + nice_file_name + '.jpg'\n if mode == 'project':\n images_folder = fs.get_render_folder()\n image_file = images_folder + nice_file_name + '.jpg'\n elif mode == 'dialog':\n image_file = cmds.fileDialog2(cap='Save Screenshot',\n fileFilter='JPEG (*.jpg);;TIFF (*.tif)',\n startingDirectory=image_file,\n fileMode=0,\n okCaption='Save',\n dialogStyle=2)\n if image_file is not None:\n image_file = image_file[0]\n else:\n return\n print image_file\n viewport.create_viewport_snapshot(image_file)", "def isave_screenshot():\n save_screenshot(conv_s())", "def save_screenshot():\n from datetime import datetime\n from .validpath import is_pathname_valid\n from desktopmagic.screengrab_win32 import getScreenAsImage\n\n img = getScreenAsImage()\n imgfp = image_fp(datetime.now())\n assert is_pathname_valid(str(imgfp)), \\\n \"Final image filename is not a valid path.\"\n img.save(imgfp)\n log.debug(f\"Screenshot saved to '{imgfp}'\")\n return imgfp", "def _screen_shot(self):\n cmd = r\"{} shell screencap -p /sdcard/screen.png\"\n cmd = cmd.format(self.adb_path)\n subprocess.check_call(cmd)\n\n cmd = r\"{} pull /sdcard/screen.png {}\"\n cmd = cmd.format(self.adb_path, os.path.join(\n self.temp_path, \"temp.png\"))\n subprocess.check_call(cmd)", "def saveScreenPNG(self, filename):\n return nes_lib.saveScreenPNG(self.obj, filename)", "def capture(self):\n name = type(self).__name__\n module = sys.modules['__main__']\n path, name = os.path.split(module.__file__)\n name, ext = os.path.splitext(name)\n filename = path + '/' + name + '.png'\n pygame.image.save(self.screen, filename)", "def screen_save(self, path):\n self.scope.write(':DISK:SAVE:IMAGe \"%s\",PNG,SCR,OFF,INVERT' % path)\n self.wait()", "def visualise_save(self, imageFilename, offscreenRendering=True):\n renderer, renderWindow = self.build_renderer_and_window()\n renderer.ResetCameraClippingRange()\n renderWindow.SetOffScreenRendering(offscreenRendering)\n renderWindow.Render()\n save_snapshot(renderWindow, imageFilename)", "def screenshot(self):\n log.debug(\"Entering screenshot()...\")\n screen = '/tmp/screen.jpg'\n image = None\n\n try:\n os.system('screencapture -m %s' % screen)\n image = open(screen, 'rt').read()\n log.debug(\"Generated screenshot\")\n except:\n log.exception(\"Could not generate screen\")\n finally:\n os.remove(screen)\n \n log.debug(\"Exiting screenshot()...\")\n return image", "def screen_dump(self, white_background = False, png_fn = 'scope_screen_dump.png', full_screen = True):\n\t\tif white_background:\n\t\t\tbckg = 'WHITE'\n\t\telse:\n\t\t\tbckg = 'BLACK'\n\t\tif full_screen:\n\t\t\tarea = 'DSOWINDOW'\n\t\telse:\n\t\t\tarea = 'GRIDAREAONLY'\n\t\t# write \"hardcopy\" setup information:\n\t\tself.scope.write('COMM_HEADER OFF')\n\t\tself.scope.write('HARDCOPY_SETUP DEV, PNG, BCKG, '+bckg+', DEST, \"REMOTE\", AREA, '+area)\n\t\t# send screen dump command\n\t\tself.scope.write('SCREEN_DUMP')\n\t\t# read screen dump information: this is exactly the contents of a .png file, typically < 40 kB\n\t\tscreen_image_png = self.scope.read_raw()\n\t\t# write the .png file\n\t\tfile = open(png_fn, 'wb') # Can this be achieved without having to go to disk??\n\t\tfile.write(screen_image_png) # actually, this is not a bug, it's a feature, since we probably want the image in a file anyway\n\t\tfile.close()\n\t\t# x = mpimg.imread(png_fn)\n\t\t# (h,w,d) = numpy.shape(x)\n\t\t# plt.figure(num=None, figsize=(w/100, h/100), dpi=100, facecolor='w', edgecolor='k')\n\t\t# plt.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)\n\t\t# plt.imshow(x)", "def screen_shot():\n screen_shot_string_io = StringIO.StringIO()\n ImageGrab.grab().save(screen_shot_string_io, \"PNG\")\n screen_shot_string_io.seek(0)\n return screen_shot_string_io.read()", "def save_screenshot(self, file_name, width=3840, height=2160, first=True, last=True):\n if first and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.makeCurrent()\n gr3.export(file_name, width, height)\n if last and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.doneCurrent()", "def screenshot(self, image=None, name=None):\n if name == None: name = \"unnamed\"\n if image == None: pygame.image.save(self.screen, name)\n self.screenshot_taken += 1", "def test_screenshot_show(self):\n pass", "def screens_maker(task):\n rend_type = int( task['render_type'] )\n rend_project = task['project_name']\n rend_result_dir = task['result_dir']\n file_name = p_rend_type[rend_type]['file_screen'].split( '/' )[1]\n logging.info( 'IN SCREEN Maker {}'.format( task ) )\n try:\n bpy.ops.wm.open_mainfile( filepath=rend_project )\n scn = bpy.context.scene\n scn.frame_start = 100\n scn.frame_end = 101\n bpy.data.scenes[scn.name].render.image_settings.file_format = 'JPEG'\n scn.render.filepath = '{}'.format( str( rend_result_dir ) + '/' + str( file_name ) )\n bpy.ops.render.render( write_still=True )\n try:\n os.chown( scn.render.filepath, int( u_ugid ), int( u_gguid ) )\n os.chmod( scn.render.filepath, 0o777 )\n except Exception as e:\n logging.info( 'err SCREEN MAKER rights{}'.format( str( e ) ) )\n except Exception as e:\n logging.info( 'ERR IN SCREEN Maker {}'.format( str( e ) ) )\n\n return 1", "def screen_shot(self, pic_path):\n self.run_command(f'shell screencap -p /sdcard/screen.png')\n if not path.exists(pic_path):\n self.run_command(f'pull /sdcard/screen.png {pic_path}')\n else:\n raise ADBError(f'{pic_path} already exist')\n self.run_command(f'shell rm /sdcard/screen.png')\n yield pic_path\n remove(pic_path)", "def test_screenshot_create(self):\n pass", "def get_screen_image(dir=\"screenshots\"):\n screenshot_name = dir + \"/screenshot_\" + str(random.randint(0, 1e10)) + \".png\"\n\n screenshot = autopy.bitmap.capture_screen()\n screenshot.save(screenshot_name)\n return screenshot_name", "def screenshot(self, region=None):\n with _screenshot_lock:\n hwnd_dc = win32gui.GetWindowDC(self.hwnd)\n mfc_dc = win32ui.CreateDCFromHandle(hwnd_dc)\n save_dc = mfc_dc.CreateCompatibleDC()\n\n save_bitmap = win32ui.CreateBitmap()\n save_bitmap.CreateCompatibleBitmap(mfc_dc, self.width, self.height)\n\n save_dc.SelectObject(save_bitmap)\n\n # Store the actual screenshot result here through\n # the use of the windll object.\n windll.user32.PrintWindow(self.hwnd, save_dc.GetSafeHdc(), 0)\n\n bmp_info = save_bitmap.GetInfo()\n bmp_str = save_bitmap.GetBitmapBits(True)\n\n # Store the actual Image object retrieved from our windows calls\n # in this variable.\n image = Image.frombuffer(\"RGB\", (bmp_info[\"bmWidth\"], bmp_info[\"bmHeight\"]), bmp_str, \"raw\", \"BGRX\", 0, 1)\n\n # Cleanup any dc objects that are currently in use.\n # This also makes sure when we come back, nothing is in use.\n save_dc.DeleteDC()\n mfc_dc.DeleteDC()\n\n win32gui.ReleaseDC(self.hwnd, hwnd_dc)\n win32gui.DeleteObject(save_bitmap.GetHandle())\n\n # Ensure we also remove any un-needed image data, we only\n # want the in game screen, which should be the proper emulator height and width.\n image = image.crop(box=(\n 0,\n self.y_padding,\n self.width,\n self.height,\n ))\n\n # If a region has been specified as well, we should crop the image to meet our\n # region bbox specified, regions should already take into account our expected y padding.\n if region:\n image = image.crop(box=region)\n\n # Image has been collected, parsed, and cropped.\n # Return the image now, exiting will release our lock.\n return image" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> find_good_recipes(9, 10) '5158916779' >>> find_good_recipes(5, 10) '0124515891' >>> find_good_recipes(18, 10) '9251071085' >>> find_good_recipes(2018, 10) '5941429882'
def find_good_recipes(improvement_num, count): recipes = [3, 7] elf1 = 0 elf2 = 1 while len(recipes) <= improvement_num + count: elf1_value = recipes[elf1] elf2_value = recipes[elf2] recipe_sum = elf1_value + elf2_value if recipe_sum > 9: recipe_string = f"{recipe_sum:02d}" recipes.append(int(recipe_string[:1])) recipes.append(int(recipe_string[1:])) else: recipes.append(recipe_sum) elf1 = loop_around(1 + elf1 + elf1_value, len(recipes)) elf2 = loop_around(1 + elf2 + elf2_value, len(recipes)) answer_string = "" for i in range(improvement_num, improvement_num + count): answer_string += str(recipes[i]) return answer_string
[ "def test_get_similar_recipes(self):\n pass", "def test_get_random_recipes(self):\n pass", "def search_recipe(ingredients):\n\n params = '+'.join(ingredients.split())\n url_search = SEARCH_URL.format(params)\n response = req.get(url_search)\n\n return response.content", "def extract_recipes(ingredient_list):\n query = \", \".join(ingredient_list)\n # Initiate the search\n base_url = \"http://allrecipes.com\"\n entry = base_url + \"/search/results/?wt=\" + query + \"&sort=re\"\n start_page = requests.get(entry)\n tree = html.fromstring(start_page.content)\n response = tree.xpath('//article[contains(@class, \\'grid-col--fixed-tiles\\')]//@href')\n # Extract search result links\n links = set()\n for i in xrange(min(10, len(response))):\n if \"recipe\" in str(response[i]):\n links.add(base_url + response[i])\n # Spawn workers to process each link\n futures, workers = [], []\n for link in links:\n message = {'link': link}\n actor_ref = Worker.start()\n workers.append(actor_ref)\n futures.append(actor_ref.ask(message, block=False))\n # Collect and merge worker answers\n recipes = dict()\n answers = pykka.get_all(futures)\n for answer in answers:\n recipes[answer['name']] = dict()\n recipes[answer['name']]['ingredients'] = answer['ingredients']\n recipes[answer['name']]['link'] = answer['link']\n for worker in workers:\n worker.stop()\n return recipes", "def solve1(n):\n scores = [3, 7]\n elf_i, elf_j = 0, 1\n while len(scores) < n+10:\n new_recipes(scores, elf_i, elf_j)\n elf_i = move_elf(elf_i, scores)\n elf_j = move_elf(elf_j, scores)\n return ''.join([str(x) for x in scores[n:n+10]])", "def fetch_recipes(self, query, page=1):\n if page == 1:\n self.has_additional_results = True\n query = quote(query.encode('utf8'))\n search_url = self.base_search_url.format(query=query, page=page)\n r = requests.get(search_url, headers=self.request_headers)\n if r.status_code is not 200:\n self.has_additional_results = False\n return []\n\n bs = BeautifulSoup(r.text, 'lxml')\n\n # This is to ensure we get only recipe cards\n # Otherwise, we may get article or video cards, neither of which link to recipes\n recipe_cards = bs.find_all(self.is_recipe_card)\n recipes = []\n for recipe in recipe_cards:\n name_tag = recipe.find_all('h4')[0]\n name = name_tag.text.strip()\n recipe_url = self.base_recipe_url.format(recipe_url=name_tag.find_all('a')[0]['href'])\n\n if len(recipe_url) < 1:\n continue\n\n temp_recipe = {\n 'name': titlecase(name),\n 'source_url': recipe_url\n }\n\n # Epicurious has recipe descriptions in the HTML that is usually hidden to humans by CSS\n # This is a webscraper, though, so it doesn't care that it's visually hidden to the end user\n # Still not guaranteed to have a description all the time, so I'm saving it here\n recipe_description = recipe.find_all(self.is_recipe_description)\n if len(recipe_description) > 0:\n temp_recipe['description'] = recipe_description[0].text.strip()\n\n recipes.append(temp_recipe)\n\n self.has_additional_results = len(recipes) > 0\n\n source_urls = [r['source_url'] for r in recipes]\n source_urls = [r.source_url for r in Recipe.objects.filter(source_url__in=source_urls)]\n recipes = [r for r in recipes if r['source_url'] not in source_urls]\n\n return recipes", "def test_search_recipes_by_nutrients(self):\n pass", "def check_recipes(self):\n\n # This isn't perfect, unfortunately, but correctness trumps algorithmic\n # perfection. (For now.)\n crafting = self.crafting_table\n for recipe in retrieve_plugins(IRecipe).itervalues():\n dims = recipe.dimensions\n\n for x, y in crafting.iterkeys():\n if (x + dims[1] > self.crafting_stride or\n y + dims[0] > self.crafting_stride):\n continue\n\n indices = product(xrange(x, x + dims[1]),\n xrange(y, y + dims[0]))\n\n matches_needed = dims[0] * dims[1]\n\n for index, slot in zip(indices, recipe.recipe):\n\n if crafting[index] is None and slot is None:\n matches_needed -= 1\n elif crafting[index] is not None and slot is not None:\n cprimary, csecondary, ccount = crafting[index]\n skey, scount = slot\n if ((cprimary, csecondary) == skey\n and ccount >= scount):\n matches_needed -= 1\n\n if matches_needed == 0:\n # Jackpot!\n self.recipe = recipe\n self.recipe_offset = (x, y)\n return\n\n self.recipe = None", "def test_search_recipes_by_ingredients(self):\n pass", "def nearest_n_recipes(f, a, s, b, n=3):\n rlist = RECIPE_LIST\n # maps each recipe to (recipe, distance)\n dist = list(map(lambda x: (x,\n (f - x.f) ** 2 + (a - x.a) ** 2\n + (s - x.s) ** 2 + (b - x.b) ** 2), rlist))\n dist.sort(key=lambda x: x[1])\n n = min(n, len(dist))\n return dist[:n]", "def top_similar(self, n):\n return text_with_recipes", "def get_recommendations(soup_recipe):\n recommendations = soup_recipe.find(\"h2\", {\"class\": \"description\"})\n if not recommendations:\n return None\n return recommendations.get_text()", "def apply_recipe(self):\n\n crafting = self.crafting_table\n offset = self.recipe_offset\n dims = self.recipe.dimensions\n indices = product(xrange(offset[0], offset[0] + dims[1]),\n xrange(offset[1], offset[1] + dims[0]))\n count = []\n\n for index, slot in zip(indices, self.recipe.recipe):\n if slot is not None and crafting[index] is not None:\n scount = slot[1]\n tcount = crafting[index][2]\n count.append(tcount // scount)\n\n counted = min(count)\n if counted > 0:\n return self.recipe.provides[0], self.recipe.provides[1] * counted", "def selectRecipe(results):\n if len(results) > 10:\n range_max = 10\n else:\n range_max = len(results)\n for index in range(0, range_max):\n print '{0} : {1}'.format(index+1, results[index])\n select = int(raw_input(\"Select a recipe: \"))\n return results[select-1]", "def test_search_recipes(self):\n pass", "def find_better_question(self) -> str:\n ...", "def check_recipes(self):\n\n self.recipe = None\n\n for recipe in all_recipes:\n if recipe.matches(self.crafting, self.crafting_stride):\n self.recipe = recipe", "async def get_recipes_from_components(\n fridge_components: dict, db_path: Path = DB_PATH\n) -> list:\n available_components = set(fridge_components.keys())\n logger.debug(\"Available components: {}\".format(available_components))\n\n # Updated counters of users' components\n for component in available_components:\n await execute_query(\n \"UPDATE components SET total_encountered = 1 + \"\n \"(SELECT total_encountered FROM components WHERE component = ?) \"\n \"WHERE component = ?\",\n (component, component),\n db_path=db_path,\n )\n logger.debug(\"Updated component counters of: {}\".format(available_components))\n\n recipes = await get_query_results(\n \"SELECT recipe_name, components FROM recipes\", db_path=db_path\n )\n\n # Select recipes that are possible to prepare with users' components\n selected_recipes = []\n for recipe in recipes:\n recipe_components = json.loads(recipe[1])\n recipe_components_names = set([x[\"item\"] for x in recipe_components])\n logger.debug(\n \"Recipe '{}' contains '{}'\".format(recipe[0], recipe_components_names)\n )\n\n # If user has all components of the recipe, find minimum amount that can be prepared\n minimum_quantity = 0\n if recipe_components_names.issubset(available_components):\n logger.debug(\n \"Recipe '{}' can be cooked with available components.\".format(recipe[0])\n )\n\n for components in recipe_components:\n available_quantity = fridge_components[components[\"item\"]]\n needed_quantity = components[\"q\"]\n\n if minimum_quantity:\n minimum_quantity = min(\n minimum_quantity, available_quantity / needed_quantity\n )\n else:\n # First cycle\n minimum_quantity = available_quantity / needed_quantity\n\n selected_recipes.append({\"name\": recipe[0], \"quantity\": minimum_quantity})\n\n selected_recipes_names = [x[\"name\"] for x in selected_recipes]\n\n # Update last recommended time for recipes\n for recipe_name in selected_recipes_names:\n current_time = int(time())\n\n await execute_query(\n \"UPDATE recipes SET last_recommended = ? WHERE recipe_name = ?\",\n (current_time, recipe_name),\n db_path=db_path,\n )\n logger.debug(\"Updated last recommended times of: {}\".format(selected_recipes_names))\n\n return selected_recipes", "def recipe_present(self, search_string):\n response = requests.get(\n f\"https://api.github.com/repos/bioconda/bioconda-recipes/contents/recipes/{search_string}\",\n timeout=MULLED_SOCKET_TIMEOUT,\n )\n check_github_api_response_rate_limit(response)\n return response.status_code == 200" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }