query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Get BERT embeddings from a dataloader generator.
def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False): use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") with torch.set_grad_enabled(False): embeddings = {'ids': [], 'embeddings': [], 'labels': [] } # get BERT training embeddings if metadata: for local_ids, local_data, local_meta, local_labels in data_generator: local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \ local_meta, \ local_labels.to(device).long() #print(local_data[0].shape) augmented_embeddings = embedding_model(local_data, local_meta) embeddings['ids'].extend(np.array(local_ids)) embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu())) embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist())) else: for local_ids, local_data, local_labels in data_generator: local_data, local_labels = local_data.to(device).long().squeeze(1), \ local_labels.to(device).long() #print(local_data[0].shape) augmented_embeddings = embedding_model(local_data) embeddings['ids'].extend(np.array(local_ids)) embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu())) embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist())) return embeddings
[ "def extract_torch_models_embeddings(dataloader, model, cuda, embedding_size=512):\n # model.eval()\n # embeddings = np.zeros((len(dataloader.dataset), embedding_size))\n #\n # one_embedding = torch.zeros(batch_size, embedding_size, 1, 1)\n #\n # def copy_data(m, i, o):\n # one_embedding.copy_(o.data)\n #\n # layer = model._modules.get('avgpool')\n # h = layer.register_forward_hook(copy_data)\n #\n # meshcodes = []\n # k = 0\n # for images, _, meshcode in tqdm(dataloader):\n # if cuda:\n # images = images.cuda()\n # _ = model(images)\n # embeddings[k:k + images.shape[0]] = one_embedding.numpy()[:, :, 0, 0] # batchsize x 512 x 1 x 1\n # k += images.shape[0]\n # meshcodes += list(meshcode)\n #\n # h.remove()\n # return embeddings, meshcodes\n\n model.eval()\n # 1D embedding, dataset_size by embedding_size\n embeddings = np.zeros((len(dataloader.dataset), embedding_size))\n labels = []\n k = 0\n for images, _, label in tqdm(dataloader):\n if cuda:\n images = images.cuda()\n embeddings[k:k + images.shape[0]] = model.get_embedding(images).data.cpu().numpy()\n k += images.shape[0]\n labels += list(label)\n\n return embeddings, labels", "def get_embeddings(self, data):\n raise NotImplementedError()", "def generate_embeddings(args):\n Path(args.temp_dir).mkdir(parents=True, exist_ok=True)\n Path(args.ptag_emb).mkdir(parents=True, exist_ok=True)\n Path(args.examples_dir).mkdir(parents=True, exist_ok=True)\n\n # With AutoTokenizer, huggingface uses faster rust implementation of the tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.bert_model)\n model = AutoModel.from_pretrained(args.bert_model)\n\n # create embeddings for POS-tags\n save_ptag_embeddings(args, tokenizer, model)\n\n dataset = datasets.load_dataset('gen_embeddings.py', data_files={'train': args.train, 'test': args.test})\n # create input batches for training\n input_batches = create_input_batches(dataset, batch_size=args.batch_size, split='train')\n logging.info(f\"Number of input batches : {len(input_batches)}\")\n\n n_batch = 0\n\n # generate embeddings\n for batched_wordLists, batched_ptags in input_batches:\n # offset_map returns (char_start, char_end) for each token. (0,0) for special tokens\n # https://stackoverflow.com/questions/66666525/how-to-map-token-indices-from-the-squad-data-to-tokens-from-bert-tokenizer\n encoded_input_with_offset_mapping = tokenizer(batched_wordLists, is_split_into_words=True, padding=True, return_tensors='pt', return_offsets_mapping=True)\n encoded_input = deepcopy(encoded_input_with_offset_mapping)\n encoded_input.pop('offset_mapping')\n\n batched_word2token_position = [] # for every wordlist in the batch, stores [(word1, pos_tag1, token_start_pos, token_end_pos),..]\n tokens_embed = defaultdict(list)\n\n for i, wordlist in enumerate(batched_wordLists):\n word2token_position = []\n k = 1 # skip 0th position that contains special token [CLS]\n for j, word in enumerate(wordlist):\n tup = encoded_input_with_offset_mapping['offset_mapping'][i][k]\n start_pos = k\n tup_len = tup[1] - tup[0]\n if len(word) == tup_len:\n end_pos = k\n else: # iterate over the following tuples\n while len(word) != tup_len:\n k += 1\n tup = encoded_input_with_offset_mapping['offset_mapping'][i][k]\n tup_len += tup[1] - tup[0]\n end_pos = k\n word2token_position.append((word, batched_ptags[i][j], start_pos, end_pos + 1)) # (word, pos_tag, token_start_pos, token_end_pos)\n k += 1\n batched_word2token_position.append(word2token_position)\n\n embeddings = model(**encoded_input)[0].squeeze()\n\n # Create dict where keys are POS tags, and values are lists of embeddings\n # of all words that we encountered in dataset\n # Note for one POS tag we can have many embeddings for the same words\n # That can be good if these embeddings are different due to different contexts\n # But we can restrict that to include only one embeddings per word per POS tag\n for ind, word2token_position in enumerate(batched_word2token_position):\n for tup in word2token_position:\n pos_tag = tup[1]\n embed = torch.mean(embeddings[ind][tup[2]:tup[3]], dim=0)\n tokens_embed[pos_tag].append(embed)\n #print(pos_tag)\n\n with open(f\"{args.temp_dir}/batch_{n_batch}_embeds.pkl\", \"wb\") as f:\n pkl.dump(tokens_embed, f)\n\n n_batch += 1\n if n_batch % 100 == 0:\n logging.info(f\"{n_batch} batches processed...\")", "def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)", "def get_BERT_iter(self, iter_):\n for batch in iter_:\n batch = self.prepare_BERT_batch(batch)\n yield batch", "def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)", "def get_embeddings(model, g1, g2=None):\n embeddings = []\n handle = model.node_embedder.register_forward_hook(lambda module, inp, outp : embeddings.append(outp))\n if g2 is None :\n model(g1)\n else:\n model(g1,g2)\n handle.remove()\n return embeddings", "def fixture_data_loader(global_vars):\n data = np.random.rand(global_vars[\"num_data\"],\n global_vars[\"n_features\"])\n data_loader = []\n indices_batch_split = np.arange(len(data))[global_vars[\"batch_size\"]::\n global_vars[\"batch_size\"]]\n batch_indices = np.array_split(np.arange(len(data)), indices_batch_split)\n for indices_samples in batch_indices:\n data_chunk = data[indices_samples]\n data_loader.append([data_chunk, indices_samples])\n\n return data_loader, data", "def extract_embeddings(ds, config):\n from lidbox.models.keras_utils import KerasWrapper\n\n extractors = [(KerasWrapper.from_config_as_embedding_extractor_fn(e), _get_device_or_default(e))\n for e in config[\"extractors\"]]\n # ConcreteFunctions will be pretty-formatted starting from TF 2.3\n # https://www.tensorflow.org/guide/concrete_function#changes_for_tensorflow_23\n logger.info(\"Using %d extractors:\\n %s\",\n len(extractors),\n '\\n '.join(\"on device '{:s}':\\n {}\".format(d, _left_pad_lines(str(e), 2)) for e, d in extractors))\n\n def _append_embeddings(x):\n embeddings = []\n for extractor_fn, device in extractors:\n with tf.device(device):\n embeddings.append(extractor_fn(x[\"input\"]))\n return dict(x, embedding=tf.concat(embeddings, axis=1))\n\n batch_size = tf.constant(config.get(\"batch_size\", 1), tf.int64)\n logger.info(\"Batching inputs with batch size %s, extracting embeddings in batches.\", batch_size.numpy())\n ds = (ds.batch(batch_size)\n .prefetch(TF_AUTOTUNE)\n .map(_append_embeddings, num_parallel_calls=TF_AUTOTUNE))\n\n if not config.get(\"no_unbatch\", False):\n logger.info(\"Unbatching after embedding extraction\")\n ds = ds.unbatch()\n\n return ds", "def get_dataloaders(self):\n raise NotImplementedError('You must provide the dataloaders for the datasets.')", "def generate_embeddings(self, sound, batch_size=256):\n if not self.model_loaded:\n self.load_pre_trained_model()\n\n input_len = sound.shape[0]\n raw_embeddings = np.array([], dtype=np.int16).reshape(0, 128)\n for batch_index in range(0, input_len, batch_size):\n a_batch = sound[batch_index:batch_index + batch_size]\n # examples_batch = vggish_input.wavfile_to_examples(wav_file)\n [embedding_batch] = self.session_embedding.\\\n run([self.embedding_tensor],\n feed_dict={self.features_tensor: a_batch})\n raw_embeddings = np.concatenate((raw_embeddings, embedding_batch))\n # TODO post-processing can be batched as well\n post_processed_embeddings = self.pproc.postprocess(raw_embeddings)\n\n return raw_embeddings, post_processed_embeddings", "def get_embeddings(self):\n return self.model.input_embeddings.weight.cpu().data.numpy()", "def adversarial_dataloaders(self, batch_size=32, shuffle_b=False):\n train_loader = DataLoader(self.dataset_b, batch_size=batch_size, shuffle=shuffle_b, collate_fn=collate)\n valid_loader = DataLoader(self.dataset_c, batch_size=batch_size, collate_fn=collate)\n return train_loader, valid_loader", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def get_embedding(data):\n disable_eager_execution() # to avoid EagerTensor RuntimeError\n embed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder/4\")\n response = dict()\n with tf.compat.v1.Session() as session:\n session.run([tf.compat.v1.global_variables_initializer(), tf.compat.v1.tables_initializer()])\n for key,value in data.items():\n key_embedding = embed([key]) #initial sentence USE embedding\n vector1 = session.run(key_embedding)\n tmp = []\n for candidate in value:\n candidate_embedding = embed([candidate]) #candidate USE embedding\n vector2 = session.run(candidate_embedding)\n \n a=np.reshape(vector1,(1,-1))\n b=np.reshape(vector2,(1,-1))\n cos_lib = cosine_similarity(a,b) #cosine similarity\n if cos_lib > 0.5:\n tmp.append(candidate)\n response[key]=tmp\n\n return response", "def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)", "def get_embeddings(self, entities, type='entity'):\n return None", "def get_influencers_emb(self, artist_batch):\n influencers_batch = []\n max_len = 0\n for artist in artist_batch:\n # Load emb for that artist\n emb_path = os.path.join(WIKIART_ARTIST_INFLUENCERS_EMBEDDINGS_PATH, artist, 'embedding.pkl')\n emb = pickle.load(open(emb_path, 'r')) # numpy (num_means, emb_size)\n\n # Update max length\n if emb.shape[0] > max_len:\n max_len = emb.shape[0]\n\n emb_len_artist = [emb, emb.shape[0], artist]\n influencers_batch.append(emb_len_artist)\n\n # Sort by sequence length in descending order\n influencers_batch = sorted(influencers_batch, key=lambda x: -x[1])\n lengths = [length for emb, length, artist in influencers_batch]\n sorted_artists = [artist for emb, length, artist in influencers_batch]\n\n # Now that we have the max length, create a matrix that of size (max_seq_len, batch, emb_size)\n batch_size = len(artist_batch)\n emb_size = influencers_batch[0][0].shape[1]\n input = np.zeros((max_len, batch_size, emb_size))\n for i, (emb, _, _) in enumerate(influencers_batch):\n padded = np.zeros((max_len, emb_size))\n padded[:len(emb), :] = emb\n input[:, i, :] = padded\n\n # Convert to Variable\n input = Variable(torch.Tensor(input))\n\n # Create packed sequence\n packed_sequence = nn.utils.rnn.pack_padded_sequence(input, lengths, batch_first=False)\n\n return packed_sequence, sorted_artists" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Justified (name, value, units, doc) strings for active parameters.
def param_strs(self): name_len = max(len(p.name) for p in self) value_len = max(len(p.value_str) for p in self.params.values()) units_len = max(len(p.units) for p in self.params.values()) return [(p.name.ljust(name_len), p.value_str.ljust(value_len), p.units.ljust(units_len), p.__doc__) for p in self.params.values() if p]
[ "def __parameters_string(self):\n if self._parameters == list():\n return ''\n\n docstring = \"\"\"\n\nParameters:\n\"\"\"\n \n # Compute maximum length of any parameter name\n maxlen = 0\n for param in self._parameters:\n maxlen = max(maxlen, len(param[0]))\n\n # Build documentation for parameters\n for (on_param, param) in enumerate(self._parameters):\n if on_param > 0:\n docstring += '\\n'\n\n docstring += ' ' + param[0].ljust(maxlen + 2)\n doc = wrap(param[1], columns - maxlen - 4)\n padding = str('')\n for line in doc.split('\\n'):\n docstring += padding + line + '\\n'\n padding = str('').ljust(maxlen + 4)\n \n # Pull off the final '\\n'\n return docstring[0:len(docstring)-1]", "def _FormatParam(self, param: Dict[str, str], setting: str) -> cp.Labeled:\n return cp.Labeled([\n cp.Lines([\n '{} [{}]'.format(param['name'], setting),\n cp.Lines([\n param['description'],\n ' '\n ])\n ])\n ])", "def _parameter_summary(self, parameters, parameters_to_show=4):\n params = parameters\n if len(parameters) > parameters_to_show:\n params = parameters[:2] + [\"...\"] + parameters[-2:]\n return \", \".join(params)", "def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s", "def _format_params(params: Dict[str, str]) -> str:\n return \"\".join([f\"{key:>25}: {value:<25}\\n\" for key, value in params.items()])", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def _plain_format_params(params: Dict[str, str]) -> str:\n return \"\".join([f\"{key}: {value}\\n\" for key, value in params.items()])", "def hyperparams_str(self):\n s = 'Hyperparameters'\n if self.hyperparams:\n for k, v in self.hyperparams.items():\n s += '\\n- {0}: \\t{1}'.format(k, v)\n else:\n s += ' - N/A'\n return s", "def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))", "def _write_params(self, size):\n msg = []\n if self.params:\n msg = ['$PARAMS\\n']\n for (key, param) in sorted(self.params.iteritems()):\n msg.append(param.print_card(size))\n return ''.join(msg)", "def __str__(self):\n return 'Tensor product {}: {} params, wires {}'.format([i.name for i in self.obs], len(self.params), self.wires)", "def __make_description(self, param_name):\n value = self._params.get_value(param_name)\n return \"%s (Currently %s)\" % (param_name, str(value))", "def __str__ (self):\n if len(self.attrs) > 0:\n return '{ %s }' % ', '.join(': '.join(item) for item in self.attrs.items())\n else:\n return ''", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def show_param(self):\n\n print '__ell_pot = '#, self.__ell_gravlens\n print '__b_sis = '#, self.__b_sis\n print '__z_lens = ', self.__z_lens", "def summary_parameters(self):\n text = re.sub(r'\\r?\\n', ' - ', self.opt.text[:200])\n return {'adding': text}", "def markdown_param_table(self, properties, required, columns):\n out = \"\"\n out += \"\".join([f\"| {column.title()} \" for column in columns])\n out += \"|\\n\"\n out += \"\".join([\"|-----------\" for _ in columns])\n out += \"|\\n\"\n for p_key, param in properties.items():\n for column in columns:\n if column == \"parameter\":\n out += f\"| `{p_key}` \"\n elif column == \"description\":\n desc = param.get(\"description\", \"\").replace(\"\\n\", \"<br>\")\n out += f\"| {desc} \"\n if param.get(\"help_text\", \"\") != \"\":\n help_txt = param[\"help_text\"].replace(\"\\n\", \"<br>\")\n out += f\"<details><summary>Help</summary><small>{help_txt}</small></details>\"\n elif column == \"type\":\n out += f\"| `{param.get('type', '')}` \"\n elif column == \"required\":\n out += f\"| {p_key in required or ''} \"\n else:\n out += f\"| {param.get(column, '')} \"\n out += \"|\\n\"\n return out", "def __str__(self):\n\n return \"<ExoParameter>: {0}\".format(self.__dict__)", "def print_attr(self):\n return \"name : {0}\\nprice : {1}\\ndescription : {2}\".format(\n self.name, self.price, self.description\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Base hash on description string, just like equality operator.
def __hash__(self): return hash(self.description)
[ "def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))", "def __hash__(self):\n return hash(self.text)", "def hash(self, string):\n return self.__scaffydb.hash(string)", "def __hash__(self):\n return hash(str(self))", "def hash_string(self):\n return self._hash_string", "def __hash__(self):\n return hash(self.get_canonical_identifier())", "def __hash__(self):\n return hash((self.title, self.isbn))", "def __hash__(self):\n return hash(self.serialize(\"glycoct\"))", "def __hash__(self):\n return hash((self._start, self._end, self._name, self._value))", "def state_hash(self, s):\n pass", "def volume_label_hash( s ):\n h = hashlib.md5( s )\n h = h.digest( )\n assert len( h ) == 16\n hi, lo = struct.unpack( '!QQ', h )\n h = hi ^ lo\n h = struct.pack( '!Q', h )\n assert len( h ) == 8\n h = base64.urlsafe_b64encode( h )\n assert h[-1] == '='\n return h[:-1]", "def _hash(self, string):\r\n return sum([(ord(char) * self.x ** i) for i, char in enumerate(string)]) % self.p % self.m", "def hash(self, string):\n return (sum((ord(i) for i in str(string)))) % self.numBuckets", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def hash(astring, tablesize):", "def __hash__(self) -> int:\n return hash(self._hashable_content())", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert 2D alignment parameters (alpha, sx, sy, mirror) into 3D alignment parameters (phi, theta, psi, s2x, s2y, mirror)
def params_2D_3D(alpha, sx, sy, mirror): phi = 0 psi = 0 theta = 0 alphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1) if mirror > 0: phi = (540.0 + phi)%360.0 theta = 180.0 - theta psi = (540.0 - psi + alphan)%360.0 else: psi = (psi + alphan)%360.0 return phi, theta, psi, s2x, s2y
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def align_3d_to_2d(self, pose_2d, pose_3d, camera, rootIdx):\n R = camera['R']\n s = np.mean(camera['s'])\n t = np.reshape(camera['t'], (2, 1))\n translation = np.dot(inv(R), np.vstack((t / s, s)))\n aligned3d = s * np.dot(R, (pose_3d + translation.T).T).T\n return aligned3d - np.array([0, 0, aligned3d[rootIdx, 2]])", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def translate_3D_coordinates_along_axes(x, y, z, shift_x, shift_y, shift_z):\n x_shifted = x + shift_x\n y_shifted = y + shift_y\n z_shifted = z + shift_z\n return x_shifted, y_shifted, z_shifted", "def align(fixed_shape, mov_shape):\n\n #### centralizing shapes\n centralized_fixed = centralize(fixed_shape)\n centralized_mov = centralize(mov_shape)\n\n #### extracting x and y coordinates\n x_fixed = centralized_fixed[:int(len(centralized_fixed)/2)] # was mov_shape\n y_fixed = centralized_fixed[int(len(centralized_fixed)/2):] # was mov_shape\n\n x_mov = centralized_mov[:int(len(centralized_mov)/2)]\n y_mov = centralized_mov[int(len(centralized_mov)/2):]\n\n ### computing transformation parameters\n mov_norm = np.linalg.norm(centralized_mov)\n\n a = np.dot(np.squeeze(centralized_fixed), np.squeeze(centralized_mov))/np.square(mov_norm)\n\n b = np.sum(x_mov * y_fixed - x_fixed * y_mov) / np.square(mov_norm)\n\n s = np.sqrt(np.square(a) + np.square(b))\n\n theta = np.arctan(b/a)\n\n ### translation computation\n x_fixed = fixed_shape[:int(len(fixed_shape)/2)]\n y_fixed = fixed_shape[int(len(fixed_shape)/2):]\n\n x_mov = mov_shape[:int(len(mov_shape)/2)]\n y_mov = mov_shape[int(len(mov_shape)/2):]\n\n tx = np.mean(x_fixed - x_mov)\n ty = np.mean(y_fixed - y_mov)\n\n aligned_shape = transform(mov_shape, scale=s, theta=theta, translate_x=tx, translate_y=ty)\n param = dict()\n param[\"scale\"] = s\n param[\"theta\"] = theta\n param[\"translate_x\"] = tx\n param[\"translate_y\"] = ty\n return aligned_shape, param", "def get_affine_matrix3d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angles: Tensor,\n sxy: Tensor | None = None,\n sxz: Tensor | None = None,\n syx: Tensor | None = None,\n syz: Tensor | None = None,\n szx: Tensor | None = None,\n szy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def vs3_func_3(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n ang_deg, d = vs_params # degrees, nm\n ang_rad = np.deg2rad(ang_deg) # retrieve radians\n d = d * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = r_jk - (np.dot(r_ij, r_jk) / np.dot(r_ij, r_ij)) * r_ij\n traj[ts.frame] = pos_i + d * np.cos(ang_rad) * (r_ij / mda.lib.mdamath.norm(r_ij)) + d * np.sin(ang_rad) * (\n comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm**(-1)\n c = c / 10 # retrieve amgstrom**(-1) for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (\n r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))", "def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b = vs_params # weight, nm\n b = b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = (1 - a) * r_ij + a * r_jk\n traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def transformFromRotationPosition3D(*args):\n return _almathswig.transformFromRotationPosition3D(*args)", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def get_affine_params(self):\n params = {'scales': [], 'shifts': []}\n for transform in reversed(self.transforms):\n if '_scale' in dir(transform):\n params['scales'].append(transform._scale)\n params['shifts'].append(transform._shift)\n return params" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert 3D alignment parameters (phi, theta, psi, s2x, s2y) there is no mirror in 3D! into 2D alignment parameters (alpha, sx, sy, mirror)
def params_3D_2D(phi, theta, psi, s2x, s2y): if theta > 90.0: mirror = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0) else: mirror = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0) return alpha, sx, sy, mirror
[ "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def align_3d_to_2d(self, pose_2d, pose_3d, camera, rootIdx):\n R = camera['R']\n s = np.mean(camera['s'])\n t = np.reshape(camera['t'], (2, 1))\n translation = np.dot(inv(R), np.vstack((t / s, s)))\n aligned3d = s * np.dot(R, (pose_3d + translation.T).T).T\n return aligned3d - np.array([0, 0, aligned3d[rootIdx, 2]])", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def align(fixed_shape, mov_shape):\n\n #### centralizing shapes\n centralized_fixed = centralize(fixed_shape)\n centralized_mov = centralize(mov_shape)\n\n #### extracting x and y coordinates\n x_fixed = centralized_fixed[:int(len(centralized_fixed)/2)] # was mov_shape\n y_fixed = centralized_fixed[int(len(centralized_fixed)/2):] # was mov_shape\n\n x_mov = centralized_mov[:int(len(centralized_mov)/2)]\n y_mov = centralized_mov[int(len(centralized_mov)/2):]\n\n ### computing transformation parameters\n mov_norm = np.linalg.norm(centralized_mov)\n\n a = np.dot(np.squeeze(centralized_fixed), np.squeeze(centralized_mov))/np.square(mov_norm)\n\n b = np.sum(x_mov * y_fixed - x_fixed * y_mov) / np.square(mov_norm)\n\n s = np.sqrt(np.square(a) + np.square(b))\n\n theta = np.arctan(b/a)\n\n ### translation computation\n x_fixed = fixed_shape[:int(len(fixed_shape)/2)]\n y_fixed = fixed_shape[int(len(fixed_shape)/2):]\n\n x_mov = mov_shape[:int(len(mov_shape)/2)]\n y_mov = mov_shape[int(len(mov_shape)/2):]\n\n tx = np.mean(x_fixed - x_mov)\n ty = np.mean(y_fixed - y_mov)\n\n aligned_shape = transform(mov_shape, scale=s, theta=theta, translate_x=tx, translate_y=ty)\n param = dict()\n param[\"scale\"] = s\n param[\"theta\"] = theta\n param[\"translate_x\"] = tx\n param[\"translate_y\"] = ty\n return aligned_shape, param", "def gen_position(kps,dim,rot,meta,const):\n b=kps.size(0)\n c=kps.size(1)\n calib=meta['calib']\n off_set = (calib[:, 0, 3]) / calib[:, 0, 0] # [B, 1]\n\n #opinv = opinv.unsqueeze(1)\n #opinv = opinv.expand(b, c, -1, -1).contiguous().view(-1, 2, 3).float()\n kps = kps.view(b, c, -1, 2).permute(0, 1, 3, 2)\n #hom = torch.ones(b, c, 1, 9).cuda()\n #kps = torch.cat((kps, hom), dim=2).view(-1, 3, 9)\n #kps = torch.bmm(opinv, kps).view(b, c, 2, 9)\n kps = kps.permute(0, 1, 3, 2).contiguous().view(b, c, -1) # 16.32,18\n si = torch.zeros_like(kps[:, :, 0:1]) + calib[:, 0:1, 0:1]\n\n alpha_idx = rot[:, :, 1] > rot[:, :, 5]\n alpha_idx = alpha_idx.float()\n alpha1 = torch.atan(rot[:, :, 2] / rot[:, :, 3]) + (-0.5 * np.pi)\n alpha2 = torch.atan(rot[:, :, 6] / rot[:, :, 7]) + (0.5 * np.pi)\n alpna_pre = alpha1 * alpha_idx + alpha2 * (1 - alpha_idx)\n alpna_pre = alpna_pre.unsqueeze(2)\n\n # alpna_pre=rot_gt\n\n rot_y = alpna_pre + torch.atan2(kps[:, :, 16:17] - calib[:, 0:1, 2:3], si)\n rot_y[rot_y > np.pi] = rot_y[rot_y > np.pi] - 2 * np.pi\n rot_y[rot_y < - np.pi] = rot_y[rot_y < - np.pi] + 2 * np.pi\n\n calib = calib.unsqueeze(1)\n calib = calib.expand(b, c, -1, -1).contiguous()\n kpoint = kps[:, :, :16]\n f = calib[:, :, 0, 0].unsqueeze(2)\n f = f.expand_as(kpoint)\n cx, cy = calib[:, :, 0, 2].unsqueeze(2), calib[:, :, 1, 2].unsqueeze(2)\n cxy = torch.cat((cx, cy), dim=2)\n cxy = cxy.repeat(1, 1, 8) # b,c,16\n kp_norm = (kpoint - cxy) / f\n\n l = dim[:, :, 2:3]\n h = dim[:, :, 1:2]\n w = dim[:, :, 0:1]\n cosori = torch.cos(rot_y)\n sinori = torch.sin(rot_y)\n\n B = torch.zeros_like(kpoint)\n C = torch.zeros_like(kpoint)\n\n kp = kp_norm.unsqueeze(3) # b,c,16,1\n const = const.expand(b, c, -1, -1)\n A = torch.cat([const, kp], dim=3)\n\n ## The order of the point has been changed, so should the matrixes\n # B[:, :, 0:1] = l * 0.5 * cosori + w * 0.5 * sinori\n # B[:, :, 1:2] = h * 0.5\n # B[:, :, 2:3] = l * 0.5 * cosori - w * 0.5 * sinori\n # B[:, :, 3:4] = h * 0.5\n # B[:, :, 4:5] = -l * 0.5 * cosori - w * 0.5 * sinori\n # B[:, :, 5:6] = h * 0.5\n # B[:, :, 6:7] = -l * 0.5 * cosori + w * 0.5 * sinori\n # B[:, :, 7:8] = h * 0.5\n # B[:, :, 8:9] = l * 0.5 * cosori + w * 0.5 * sinori\n # B[:, :, 9:10] = -h * 0.5\n # B[:, :, 10:11] = l * 0.5 * cosori - w * 0.5 * sinori\n # B[:, :, 11:12] = -h * 0.5\n # B[:, :, 12:13] = -l * 0.5 * cosori - w * 0.5 * sinori\n # B[:, :, 13:14] = -h * 0.5\n # B[:, :, 14:15] = -l * 0.5 * cosori + w * 0.5 * sinori\n # B[:, :, 15:16] = -h * 0.5\n\n B[:, :, 0:1] = - l * 0.5 * cosori - w * 0.5 * sinori\n B[:, :, 1:2] = - h * 0.5\n B[:, :, 2:3] = - l * 0.5 * cosori + w * 0.5 * sinori\n B[:, :, 3:4] = - h * 0.5\n B[:, :, 4:5] = - l * 0.5 * cosori + w * 0.5 * sinori\n B[:, :, 5:6] = h * 0.5\n B[:, :, 6:7] = l * 0.5 * cosori + w * 0.5 * sinori\n B[:, :, 7:8] = h * 0.5\n B[:, :, 8:9] = l * 0.5 * cosori + w * 0.5 * sinori\n B[:, :, 9:10] = - h * 0.5\n B[:, :, 10:11] = l * 0.5 * cosori - w * 0.5 * sinori\n B[:, :, 11:12] = - h * 0.5\n B[:, :, 12:13] = l * 0.5 * cosori - w * 0.5 * sinori\n B[:, :, 13:14] = h * 0.5\n B[:, :, 14:15] = - l * 0.5 * cosori - w * 0.5 * sinori\n B[:, :, 15:16] = h * 0.5\n\n C[:, :, 0:1] = l * 0.5 * sinori - w * 0.5 * cosori # - l * 0.5 * cosori - w * 0.5 * sinori\n C[:, :, 1:2] = l * 0.5 * sinori - w * 0.5 * cosori\n C[:, :, 2:3] = l * 0.5 * sinori + w * 0.5 * cosori # - l * 0.5 * cosori + w * 0.5 * sinori\n C[:, :, 3:4] = l * 0.5 * sinori + w * 0.5 * cosori\n C[:, :, 4:5] = l * 0.5 * sinori + w * 0.5 * cosori # - l * 0.5 * cosori + w * 0.5 * sinori\n C[:, :, 5:6] = l * 0.5 * sinori + w * 0.5 * cosori\n C[:, :, 6:7] = - l * 0.5 * sinori + w * 0.5 * cosori # l * 0.5 * cosori + w * 0.5 * sinori\n C[:, :, 7:8] = - l * 0.5 * sinori + w * 0.5 * cosori\n C[:, :, 8:9] = - l * 0.5 * sinori + w * 0.5 * cosori # l * 0.5 * cosori + w * 0.5 * sinori\n C[:, :, 9:10] = - l * 0.5 * sinori + w * 0.5 * cosori\n C[:, :, 10:11] = - l * 0.5 * sinori - w * 0.5 * cosori # l * 0.5 * cosori - w * 0.5 * sinori\n C[:, :, 11:12] = - l * 0.5 * sinori - w * 0.5 * cosori\n C[:, :, 12:13] = - l * 0.5 * sinori - w * 0.5 * cosori # l * 0.5 * cosori - w * 0.5 * sinori\n C[:, :, 13:14] = - l * 0.5 * sinori - w * 0.5 * cosori\n C[:, :, 14:15] = l * 0.5 * sinori - w * 0.5 * cosori # - l * 0.5 * cosori - w * 0.5 * sinori\n C[:, :, 15:16] = l * 0.5 * sinori - w * 0.5 * cosori\n\n B = B - kp_norm * C\n\n # A=A*kps_mask1\n A = A.double() # For Numerical Stability. We add this line after repeated debugging.\n AT = A.permute(0, 1, 3, 2)\n AT = AT.view(b * c, 3, 16)\n A = A.view(b * c, 16, 3)\n B = B.view(b * c, 16, 1).float()\n # mask = mask.unsqueeze(2)\n\n pinv = torch.bmm(AT, A)\n pinv = torch.inverse(pinv + torch.randn_like(pinv) * 1e-8) # b*c 3 3\n pinv = torch.bmm(pinv, AT).float() # Change back to Float\n pinv = torch.bmm(pinv, B)\n pinv = pinv.view(b, c, 3, 1).squeeze(3)\n\n #pinv[:, :, 1] = pinv[:, :, 1] + dim[:, :, 0] / 2 ## No need to transfer to bottom point. We always use the center point unless in writing to KITTI\n pinv[:, :, 0] -= off_set.unsqueeze(1)\n \n return pinv,rot_y,alpna_pre, kps", "def translate_3D_coordinates_along_axes(x, y, z, shift_x, shift_y, shift_z):\n x_shifted = x + shift_x\n y_shifted = y + shift_y\n z_shifted = z + shift_z\n return x_shifted, y_shifted, z_shifted", "def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm**(-1)\n c = c / 10 # retrieve amgstrom**(-1) for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (\n r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b = vs_params # weight, nm\n b = b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = (1 - a) * r_ij + a * r_jk\n traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def sphere_to_plane_car(az0, el0, az, el):\n return az - az0, el - el0", "def transformFromRotationPosition3D(*args):\n return _almathswig.transformFromRotationPosition3D(*args)", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Commented by Zhengfan Yang on 05/01/07 I made some change to the original amoeba so that it can now pass out some values calculated by func other than the criteria. This is important in multilevel amoeba refinement because otherwise, upper level refinement will lose the information of lower level refinement.
def amoeba_multi_level(var, scale, func, ftolerance=1.e-4, xtolerance=1.e-4, itmax=500, data=None): #print " ENTER AMOEBA MULTI LEVEL" nvar = len(var) # number of variables in the minimization nsimplex = nvar + 1 # number of vertices in the simplex # first set up the simplex simplex = [0]*(nvar+1) # set the initial simplex simplex[0] = var[:] for i in xrange(nvar): simplex[i+1] = var[:] simplex[i+1][i] += scale[i] fvalue = [] for i in xrange(nsimplex): # set the function values for the simplex result, passout = func(simplex[i], data=data) #print " amoeba setting ",i,simplex[i],result, passout fvalue.append([result, passout]) # Ooze the simplex to the maximum iteration = 0 while 1: # find the index of the best and worst vertices in the simplex ssworst = 0 ssbest = 0 for i in xrange(nsimplex): if fvalue[i][0] > fvalue[ssbest][0]: ssbest = i if fvalue[i][0] < fvalue[ssworst][0]: ssworst = i # get the average of the nsimplex-1 best vertices in the simplex pavg = [0.0]*nvar for i in xrange(nsimplex): if i != ssworst: for j in range(nvar): pavg[j] += simplex[i][j] for j in xrange(nvar): pavg[j] = pavg[j]/nvar # nvar is nsimplex-1 simscale = 0.0 for i in range(nvar): simscale += abs(pavg[i]-simplex[ssworst][i])/scale[i] simscale = simscale/nvar # find the range of the function values fscale = (abs(fvalue[ssbest][0])+abs(fvalue[ssworst][0]))/2.0 if fscale != 0.0: frange = abs(fvalue[ssbest][0]-fvalue[ssworst][0])/fscale else: frange = 0.0 # all the fvalues are zero in this case # have we converged? if (((ftolerance <= 0.0 or frange < ftolerance) and # converged to maximum (xtolerance <= 0.0 or simscale < xtolerance)) or # simplex contracted enough (itmax and iteration >= itmax)): # ran out of iterations return simplex[ssbest],fvalue[ssbest][0],iteration,fvalue[ssbest][1] # reflect the worst vertex pnew = [0.0]*nvar for i in xrange(nvar): pnew[i] = 2.0*pavg[i] - simplex[ssworst][i] fnew = func(pnew,data=data) if fnew[0] <= fvalue[ssworst][0]: # the new vertex is worse than the worst so shrink # the simplex. for i in xrange(nsimplex): if i != ssbest and i != ssworst: for j in xrange(nvar): simplex[i][j] = 0.5*simplex[ssbest][j] + 0.5*simplex[i][j] fvalue[i] = func(simplex[i],data=data) for j in xrange(nvar): pnew[j] = 0.5*simplex[ssbest][j] + 0.5*simplex[ssworst][j] fnew = func(pnew, data=data) elif fnew[0] >= fvalue[ssbest][0]: # the new vertex is better than the best so expand # the simplex. pnew2 = [0.0]*nvar for i in xrange(nvar): pnew2[i] = 3.0*pavg[i] - 2.0*simplex[ssworst][i] fnew2 = func(pnew2,data=data) if fnew2[0] > fnew[0]: # accept the new vertex in the simplexe pnew = pnew2 fnew = fnew2 # replace the worst vertex with the new vertex for i in xrange(nvar): simplex[ssworst][i] = pnew[i] fvalue[ssworst] = fnew iteration += 1 #print "Iteration:",iteration," ",ssbest," ",fvalue[ssbest]
[ "def evaluate(self, aSolution):", "def refine(self):\n\t\t\n # initialise\n self.poor_value = []\n self.poor_nrefl = []\n\n \t\t# create lists of parameters, global and for each grain\n self.globals = [\"a\",\"b\",\"c\",\"alpha\",\"beta\",\"gamma\",\"wx\",\"wy\"]\n for i in range(self.inp.fit['no_det']):\n self.globals.append(\"tx%s\" %i)\n self.globals.append(\"ty%s\" %i)\n self.globals.append(\"tz%s\" %i)\n self.globals.append(\"py%s\" %i)\n self.globals.append(\"pz%s\" %i)\n self.globals.append(\"cy%s\" %i)\n self.globals.append(\"cz%s\" %i)\n self.globals.append(\"L%s\" %i)\n\n self.grains = []\n for i in range(self.inp.no_grains):\n self.grains.append([\"x%s\" %i,\"y%s\" %i,\"z%s\" %i,\"rodx%s\" %i,\"rody%s\" %i,\"rodz%s\" %i,\n \"epsaa%s\" %i,\"epsbb%s\" %i,\"epscc%s\" %i,\"epsbc%s\" %i,\"epsac%s\" %i,\"epsab%s\" %i])\n\n # correct for z-offset\n xcom = 0\n ycom = 0\n zcom = 0\n vol = 0\n for i in range(self.inp.no_grains):\n if i+1 not in self.inp.fit['skip']:\n vol = vol + reject.median(self.inp.volume[i])\n xcom = xcom + self.inp.values['x%s' %i]*reject.median(self.inp.volume[i])\n ycom = ycom + self.inp.values['y%s' %i]*reject.median(self.inp.volume[i])\n zcom = zcom + self.inp.values['z%s' %i]*reject.median(self.inp.volume[i])\n xcom = xcom / vol\n ycom = ycom / vol\n zcom = zcom / vol\n \n for i in range(self.inp.no_grains):\n# self.inp.values['x%s' %i] = self.inp.values['x%s' %i] - xcom\n# self.inp.values['y%s' %i] = self.inp.values['y%s' %i] - ycom\n self.inp.values['z%s' %i] = self.inp.values['z%s' %i] - zcom\n \n for i in range(self.inp.fit['no_det']):\n self.inp.values['cz%s' %i] = self.inp.values['cz%s' %i] + zcom/self.inp.values['pz%s' %i]\n\n #refinement update\n reload(fcn)\n self.m = Minuit(fcn.FCN)\n self.m.values = self.inp.values\n self.m.errors = self.inp.errors\n for entries in self.m.fixed:\n self.m.fixed[entries] = True\n for entries in self.globals:\n self.m.fixed[entries] = True\n print entries, self.inp.values[entries]\n\n\t\t# determine whether to refine\n self.ref = False\n if 'globals' in self.inp.fit['goon']:\n self.ref = True\n \n\n\t\t# carry out refinement\n if self.ref == True:\n self.mg = Minuit(fcn.FCNgrain)\n self.mg.values = self.m.values\n self.mg.errors = self.m.errors\n self.mg.fixed = self.m.fixed\n\n print '\\n\\n*****Now fitting %s*****' %self.inp.fit['goon']\n print 'newreject_grain', self.inp.fit['newreject_grain']\n # calculate starting values\n g = fit_multidet.grain_values(self)\n self.fval = sum(g)\n print '\\n%s starting value %e' %(self.inp.fit['goon'],self.fval)\n t1 = time.clock()\n self.fitglobals()\n print 'Fit %s tolerance %e' %(self.inp.fit['goon'],self.m.tol)\n self.m.errors = self.inp.errors\n self.m.migrad()\n fit_multidet.scale_errors(self)\n self.inp.values = self.m.values\n self.inp.errors = self.m.errors\n write_output_multidet.write_global(self)\n\t\t\t\t\n self.time = time.clock()-t1\n print 'Fit %s time %i s' %(self.inp.fit['goon'],self.time)\n print 'Fit %s value %e \\n' %(self.inp.fit['goon'],self.m.fval)\n\t\t\t \n # apply crystal_system restraints to unit cell parameters\n if 'hex' in self.inp.fit['crystal_system'] or 'trigonal' in self.inp.fit['crystal_system'] or 'tetra' in self.inp.fit['crystal_system'] :\n self.m.values['b'] = self.m.values['a'] \n elif 'cubic' in self.inp.fit['crystal_system'] or 'isotropic' in self.inp.fit['crystal_system']:\n self.m.values['b'] = self.m.values['a']\n self.m.values['c'] = self.m.values['a']\n\t\t\t#constrain pixels to be square\n if self.inp.fit['pixel'] == 1:\n for k in range(self.inp.fit['no_det']):\n self.m.values['pz%s' %k] = self.m.values['py%s' %k]\n \n # reject outliers and save cycle info\t\n fit_multidet.reject_outliers(self)\n write_output_multidet.write_rej(self.inp,message=self.inp.fit['goon'])\n write_output_multidet.write_log(self)\n write_output_multidet.write_par(self)\n\n\t\t# move onto next refinement given by the reforder list\t\n write_output_multidet.write_values(self)\n self.inp.fit['goon'] = self.inp.fit['reforder'][self.inp.fit['reforder'].index(self.inp.fit['goon'])+1]\n\n return", "def _strategy_B(mass_function_mat):", "def amoeba(transform, parameters_tolerance=0.1, function_tolerance=0.0001, max_iterations=300, scales=None, initial_simplex_size=None):\n\n #\n # Iteration Observer\n #\n #def iterationUpdate():\n # print optimizer.GetInitialSimplexDelta()\n # print transform.GetParameters()\n \n optimizer = itk.AmoebaOptimizer.New()\n optimizer.MinimizeOn()\n # Medimax <-> Numerical Recipes in C\n # recalage/mtch_3d.c:get_facteur_precision\n # NORMAL : 1\n # PRECIS : 0.1\n # TRES_PRECIS : 0.01\n # PRECISION_MAX : 0.0001\n optimizer.SetParametersConvergenceTolerance(parameters_tolerance) # 1/10th pixel\n optimizer.SetFunctionConvergenceTolerance(function_tolerance) # 0.001 bits\n optimizer.SetMaximumNumberOfIterations(max_iterations)\n \n if initial_simplex_size is not None :\n optimizer.AutomaticInitialSimplexOff()\n delta = transform.GetNumberOfParameters()*(initial_simplex_size,) # the initial size of the simplex (initial_simplex_size units in each of the parameters)\n print delta\n optimizer.SetInitialSimplexDelta(delta)\n else :\n optimizer.AutomaticInitialSimplexOn()\n\n if scales is not None :\n optimizer.SetScales(scales)\n\n #iterationCommand = itk.PyCommand.New()\n #iterationCommand.SetCommandCallable( iterationUpdate )\n #optimizer.AddObserver( itk.IterationEvent(), iterationCommand.GetPointer() )\n \n return optimizer", "def classic_var(self,tau21=0.864,tau31=0.864,Verbose=False):\n\n self.algo = 'dozier'\n\n# Compute radiances\n# -----------------\n L21 = B21(self.T21) \n L31 = B31(self.T31) \n E21 = B21(self.Tb21)\n E31 = B31(self.Tb31)\n N = L21.size\n\n if isscalar(tau21): tau21 = tau21 * ones(L21.shape)\n if isscalar(tau31): tau31 = tau31 * ones(L31.shape)\n \n# Use a variational approach - Needs vectorization\n# ------------------------------------------------\n sig21 = 1.\n sig31 = 1.\n x0 = [600.,P_SCALE*0.1] # [Tf,p]; p here is normalized; 10 ha/ 1 km2 = 0.1\n Tf = - ones(N)\n p = - ones(N)\n niter = 200\n for i in range(N):\n rvals = fmin(Jfunc2d, x0, ftol=0.001, maxiter=niter, disp=0, full_output=1, \\\n args=(L21[i],E21[i],tau21[i],sig21,L31[i],E31[i],tau31[i],sig31))\n x = rvals[0]\n iter = rvals[2]\n if iter < niter:\n Tf[i] = x[0]\n p[i] = 100. * x[1] / P_SCALE # units is %\n\n# Quality control\n# ---------------\n m = isnan(Tf) == False\n m = m & (Tf<1800.)\n m = m & (p>0) & (p<=100)\n\n# Add solution as attributes\n# --------------------------\n self.m = m\n self.Tf = Tf\n self.p = p\n\n# Replace fire size with median size for those fires that did not converge\n# ------------------------------------------------------------------------\n I = (m == False)\n self.p[I] = median(self.p[m])\n\n self.farea = (self.p/100.) * self.pixar # km2\n self.hflux = 0.001 * self.pow / self.farea # kW/m2\n\n# Print out results\n# -----------------\n y = 100. * ( Tf[m].size ) / N + 0.05\n if Verbose:\n print_stats('__header__','Classic Dozier - Variational Results (Yield: %4.1f%%)'%y)\n print_stats('Tf (K)',Tf[m])\n print_stats('p (%)',p[m])\n print_stats('A (km2)',self.farea[m])\n print_stats('HF (kW/m2)',self.hflux[m])\n print_stats('__footer__')", "def solve(self):", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def wrapped_objective_function(scaled_parameters, observation, forcing, path_to_shapefile, path_to_dem, path_to_nlcd,start_year, years_warming_up, end_year):\n parameters = transform(scaled_parameters)\n if parameters[9] < parameters[12]: #if Imax,forest < Imax,grass\n parameters[12] = parameters[9] - 0.001\n if parameters[9] < parameters[15]: #if Imax,forest < Imax,rip\n parameters[15] = parameters[9] - 0.001 \n if parameters[10] < parameters[13]: #if Su,max,forest < if Su,max,grass\n parameters[13] = parameters[10] - 0.001\n if parameters[13] < parameters[16]: #if Su,max,grass < Su,max,rip\n parameters[16] = parameters[13] - 0.001 \n if parameters[13] < parameters[7]: #if Su,max,grass < Su,max,bare\n parameters[7] = parameters[13] - 0.001 \n if parameters[18] < parameters[4]: #if Kf,rip < Kf\n parameters[4] = parameters[18] - 0.001 \n # print(parameters)\n Qmodelled, precipitation = run_model_cma(parameters, forcing, path_to_shapefile, path_to_dem, path_to_nlcd, end_year)\n precipitation.index, Qmodelled.index, observation.index = pd.to_datetime(precipitation.index), pd.to_datetime(Qmodelled.index), pd.to_datetime(observation.index)\n mask = (observation.index >= Qmodelled.index[0]) & (observation.index <= Qmodelled.index[-1])\n observation = observation.loc[mask]\n\n ED = multi_objective(Qmodelled.loc[Qmodelled.index.year >= (start_year+years_warming_up)], observation.loc[observation.index.year >= (start_year+years_warming_up)], precipitation)[0] \n return ED", "def objective_wrapper(args, X_train, y_train):\n def objective(trial):\n #--------------------------------------------\n # ベイズ最適化でのチューニングパイパーパラメーター\n #--------------------------------------------\n params = {\n 'booster': trial.suggest_categorical('booster', ['gbtree']),\n 'objective': trial.suggest_categorical('objective', ['binary:logistic']),\n \"learning_rate\" : trial.suggest_loguniform(\"learning_rate\", 0.01, 0.01), # ハイパーパラメーターのチューニング時は固定 \n \"n_estimators\" : trial.suggest_int(\"n_estimators\", 1000, 1000), # ハイパーパラメーターのチューニング時は固定\n 'max_depth': trial.suggest_int(\"max_depth\", 3, 9), # 3 ~ 9 : 一様分布に従う。1刻み\n 'min_child_weight': trial.suggest_loguniform('min_child_weight', 0.1, 10.0), # 0.1 ~ 10.0 : 対数が一様分布に従う\n 'subsample': trial.suggest_discrete_uniform('subsample', 0.6, 0.95, 0.05), # 0.6 ~ 0.95 : 一様分布に従う。0.05 刻み\n 'colsample_bytree': trial.suggest_discrete_uniform('colsample_bytree', 0.6, 0.95, 0.05), # 0.6 ~ 0.95 : 一様分布に従う。0.05 刻み\n 'gamma': trial.suggest_loguniform(\"gamma\", 1e-8, 1.0), # 1e-8 ~ 1.0 : 対数が一様分布に従う\n 'alpha': trial.suggest_float(\"alpha\", 0.0, 0.0), # デフォルト値としておく。余裕があれば変更\n 'reg_lambda': trial.suggest_float(\"reg_lambda\", 1.0, 1.0), # デフォルト値としておく。余裕があれば変更\n 'random_state': trial.suggest_int(\"random_state\", 71, 71),\n }\n\n #--------------------------------------------\n # stratified k-fold CV での評価\n #--------------------------------------------\n # k-hold cross validation で、学習用データセットを学習用と検証用に分割したもので評価\n kf = StratifiedKFold(n_splits=args.n_splits_gs, shuffle=True, random_state=args.seed)\n for fold_id, (train_index, valid_index) in enumerate(kf.split(X_train, y_train)):\n #--------------------\n # データセットの分割\n #--------------------\n X_train_fold, X_valid_fold = X_train.iloc[train_index], X_train.iloc[valid_index]\n y_train_fold, y_valid_fold = y_train.iloc[train_index], y_train.iloc[valid_index]\n\n #--------------------\n # モデルの定義\n #--------------------\n model = xgb.XGBClassifier(\n booster = params['booster'],\n objective = params['objective'],\n learning_rate = params['learning_rate'],\n n_estimators = params['n_estimators'],\n max_depth = params['max_depth'],\n min_child_weight = params['min_child_weight'],\n subsample = params['subsample'],\n colsample_bytree = params['colsample_bytree'],\n gamma = params['gamma'],\n alpha = params['alpha'],\n reg_lambda = params['reg_lambda'],\n random_state = params['random_state'] \n )\n\n #--------------------\n # モデルの学習処理\n #--------------------\n model.fit(X_train_fold, y_train_fold)\n\n #--------------------\n # モデルの推論処理\n #--------------------\n y_pred_train[valid_index] = model.predict(X_valid_fold)\n \n accuracy = (y_train == y_pred_train).sum()/len(y_pred_train)\n return accuracy\n\n return objective", "def alphabeta_search(state, game):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n state = game\r\n d = 20 #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n eval_fn = None \r\n player = game.to_move(state)\r\n \r\n def max_value(state, alpha, beta, depth):\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n for a in game.actions(state):\r\n print \"max call:\"\r\n print state.moves\r\n print game.moves\r\n print a\r\n v = max(v, min_value(game.result(state, a),\r\n alpha, beta, depth+1))\r\n if v >= beta:\r\n return v\r\n alpha = max(alpha, v)\r\n return v\r\n\r\n def min_value(state, alpha, beta, depth):\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n for a in game.actions(state):\r\n v = min(v, max_value(game.result(state, a),\r\n alpha, beta, depth+1))\r\n if v <= alpha:\r\n return v\r\n beta = min(beta, v)\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or game.terminal_test(state)))\r\n eval_fn = eval_fn or (lambda state: game.utility(state, player))\r\n #by default, utility score is used\r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n return argmax(game.actions(state),\r\n lambda a: min_value(game.result(state, a),\r\n -infinity, infinity, 0))", "def evalueAlphaBeta(position,prof,i,alpha,beta):\n if prof==0 or positionTerminale(position): #si la position est terminale, ou le parcours de l'arbre est fini\n return {'coup':1,'valeur':evaluation(position)}\n else:\n bestCoup=0\n j=position['taille']\n if position['trait']=='NORD':\n #position est MAX\n i=1\n while(i<=j and alpha<beta): #tant qu'on ne peut pas élaguer\n if(coupAutorise(position,i)):\n p=clonePosition(position)\n p=joueCoup(position,i)\n e=evalueAlphaBeta(p,prof-1,i,alpha,beta)\n if(alpha<e['valeur']): #on stocke l'alpha min\n bestCoup=i\n alpha=e['valeur'] \n i+=1\n return {'coup':bestCoup,'valeur':alpha}\n else:\n #position est MIN\n i=1\n while(i<=j and alpha<beta): #tant qu'on ne peut pas élaguer\n if(coupAutorise(position,i)):\n p=clonePosition(position)\n p=joueCoup(position,i)\n e=evalueAlphaBeta(p,prof-1,i,alpha,beta)\n if(beta>e['valeur']): #on stocke le beta max\n bestCoup=i\n beta=e['valeur']\n i+=1\n return {'coup':bestCoup,'valeur':beta}", "def prepare_exact_solution(self):", "def custom_gelu(x):", "def _analyze_opt_criterias_(criterias, sign_threshold, max_num_cofactors, file_prefix, with_qq_plots,\n lm, step_info_list, quantiles_dict, plot_bonferroni=True, cand_genes=None,\n plot_xaxis=True, log_qq_max_val=5, eig_L=None, type='emmax', highlight_loci=None,\n write_pvals=False, snp_priors=None, ppa_threshold=0.5, emma_num=None,\n save_pvals=False , **kwargs):\n ret_dict = {}\n opt_indices = {}\n opt_file_dict = {}\n for c in criterias:\n print 'GWAs for optimal %s criteria:' % c\n if c == 'bonf':\n opt_list = sp.arange(max_num_cofactors + 1)\n for i, pval in enumerate(criterias['bonf']):\n if pval > sign_threshold:\n opt_list[i] = -1\n i_opt = opt_list.argmax()\n elif c == 'mbonf':\n fw_opt_list = sp.arange(max_num_cofactors + 1)\n for i in range(max_num_cofactors + 1):\n pval = criterias[c][i]\n if pval > sign_threshold:\n fw_opt_list[i] = -1\n fw_i_opt = fw_opt_list.argmax()\n fw_max = fw_opt_list[fw_i_opt]\n\n if max_num_cofactors > 1:\n shift = max_num_cofactors + 1\n bw_opt_list = sp.arange(max_num_cofactors - 1, 0, -1)\n for i in range(len(bw_opt_list)):\n pval = criterias[c][i + shift]\n if pval > sign_threshold:\n bw_opt_list[i] = -1\n bw_i_opt = bw_opt_list.argmax()\n bw_max = bw_opt_list[bw_i_opt]\n bw_i_opt = bw_opt_list.argmax() + shift\n if bw_max == fw_max:\n i_opt = bw_i_opt if criterias[c][fw_i_opt] > criterias[c][bw_i_opt] else fw_i_opt\n else:\n i_opt = bw_i_opt if bw_max > fw_max else fw_i_opt\n else:\n i_opt = fw_i_opt\n elif c == 'min_cof_ppa':\n fw_opt_list = sp.arange(max_num_cofactors + 1)\n for i in range(max_num_cofactors + 1):\n ppa = criterias[c][i]\n if ppa < ppa_threshold:\n fw_opt_list[i] = -1\n fw_i_opt = fw_opt_list.argmax()\n fw_max = fw_opt_list[fw_i_opt]\n\n if max_num_cofactors > 1:\n shift = max_num_cofactors + 1\n bw_opt_list = sp.arange(max_num_cofactors - 1, 0, -1)\n for i in range(len(bw_opt_list)):\n ppa = criterias[c][i + shift]\n if ppa < ppa_threshold:\n bw_opt_list[i] = -1\n bw_i_opt = bw_opt_list.argmax()\n bw_max = bw_opt_list[bw_i_opt]\n bw_i_opt = bw_opt_list.argmax() + shift\n if bw_max == fw_max:\n i_opt = bw_i_opt if criterias[c][fw_i_opt] > criterias[c][bw_i_opt] else fw_i_opt\n else:\n i_opt = bw_i_opt if bw_max > fw_max else fw_i_opt\n else:\n i_opt = fw_i_opt\n\n else:\n cur_min_val = criterias[c][0]\n min_indices = [0]\n for i in range(1, len(criterias[c])):\n v = criterias[c][i]\n if v < cur_min_val:\n cur_min_val = v\n min_indices = [i]\n if v == cur_min_val:\n min_indices.append(i)\n i_opt = min(min_indices)\n # i_opt = sp.argmin(criterias[c])\n print \" %d'th step was optimal.\" % i_opt\n ret_dict[c] = i_opt\n if i_opt <= max_num_cofactors:\n # Copy the pngs...\n if file_prefix:\n png_file_name = '%s_step%d.png' % (file_prefix, i_opt)\n opt_png_file_name = '%s_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if snp_priors != None:\n png_file_name = '%s_ppa_step%d.png' % (file_prefix, i_opt)\n opt_png_file_name = '%s_ppa_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if with_qq_plots:\n qq_png_file_name = '%s_step%d_qqplot.png' % (file_prefix, i_opt)\n opt_qq_png_file_name = '%s_step%d_opt_%s_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', qq_png_file_name, opt_qq_png_file_name)\n log_qq_png_file_name = '%s_step%d_log_qqplot.png' % (file_prefix, i_opt)\n opt_log_qq_png_file_name = '%s_step%d_opt_%s_log_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', log_qq_png_file_name, opt_log_qq_png_file_name)\n elif i_opt in opt_file_dict:\n if file_prefix:\n png_file_name = opt_file_dict[i_opt]['manhattan']\n opt_png_file_name = '%s_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if snp_priors != None:\n png_file_name = opt_file_dict[i_opt]['ppa_manhattan']\n opt_png_file_name = '%s_ppa_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n\n if with_qq_plots:\n qq_png_file_name = opt_file_dict[i_opt]['qq']\n opt_qq_png_file_name = '%s_step%d_opt_%s_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', qq_png_file_name, opt_qq_png_file_name)\n log_qq_png_file_name = opt_file_dict[i_opt]['log_qq']\n opt_log_qq_png_file_name = '%s_step%d_opt_%s_log_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', log_qq_png_file_name, opt_log_qq_png_file_name)\n\n elif not i_opt in opt_indices:\n # Perfom GWAS witht the optimal cofactors\n cofactor_snps = step_info_list[i_opt]['cofactor_snps']\n cofactors = step_info_list[i_opt]['cofactors']\n print cofactors\n lm.set_factors(cofactor_snps)\n if type == 'emmax':\n eig_R = lm._get_eigen_R_(X=lm.X)\n reml_res = lm.get_REML(eig_L=eig_L, eig_R=eig_R)\n H_sqrt_inv = reml_res['H_sqrt_inv']\n l_res = lm._emmax_f_test_(kwargs['snps'], H_sqrt_inv, snp_priors=snp_priors,\n emma_num=emma_num)\n min_pval_i = l_res['ps'].argmin()\n mahalnobis_rss = l_res['rss'][min_pval_i]\n print 'Min Mahalanobis RSS:', mahalnobis_rss\n elif type == 'lm':\n l_res = lm.fast_f_test(kwargs['snps'])\n min_pval_i = l_res['ps'].argmin()\n\n min_pval = l_res['ps'][min_pval_i]\n\n min_pval_chr_pos = (kwargs['chromosomes'][min_pval_i], kwargs['positions'][min_pval_i])\n print 'Min p-value:', min_pval\n l_pvals = l_res['ps'].tolist()\n l_perc_var_expl = l_res['var_perc'].tolist()\n opt_indices[i_opt] = {'min_pval':min_pval, 'min_pval_chr_pos':min_pval_chr_pos,\n 'kolmogorov_smirnov':agr.calc_ks_stats(l_pvals),\n 'pval_median':agr.calc_median(l_pvals)}\n if file_prefix:\n opt_file_prefix = '%s_opt_%s' % (file_prefix, c)\n if snp_priors:\n ppa_cofactors = step_info_list[i_opt]['ppa_cofactors']\n ppas = l_res['ppas'].tolist()\n else:\n ppas = None\n ppa_cofactors = None\n opt_file_dict[i_opt], res = _plot_manhattan_and_qq_(opt_file_prefix, i_opt, l_pvals, quantiles_dict,\n plot_bonferroni=True, highlight_markers=cofactors,\n cand_genes=cand_genes, plot_xaxis=plot_xaxis,\n log_qq_max_val=log_qq_max_val, with_qq_plots=with_qq_plots,\n simple_qq=True, highlight_loci=highlight_loci,\n write_pvals=write_pvals, highlight_ppa_markers=ppa_cofactors,\n ppas=ppas, perc_var_expl=l_perc_var_expl, save_pvals=save_pvals,\n **kwargs)\n if save_pvals:\n opt_indices['res'] = opt_file_dict[i_opt]['res']\n\n if type == 'emmax':\n opt_indices[i_opt]['mahalanobis_rss'] = mahalnobis_rss\n opt_indices[i_opt]['res'] = res\n return ret_dict, opt_indices", "def focus_to_grating(x,y,z,xv,yv,zv,ga=ga,gb=gb,grating_r0=grating_r0,grating_z0=grating_z0,toroid_c=toroid_c,grating_y0=grating_y0,**kwargs):\n\n x = float64(x)\n y = float64(y)\n xv = float64(xv)\n yv = float64(yv)\n zv = float64(zv)\n toroid_c = float64(toroid_c)\n\n def fz(t):\n term1 = (toroid_c + toroid_pm2*sqrt( (x+xv*t)**2 + (z+zv*t-grating_z0+toroid_pm2*toroid_c)**2 ) )**2 \n term2 = (y+yv*t-grating_y0)**2 - grating_r0**2\n return float64(term1)+float64(term2)\n def afz(t):\n return abs(fz(t))\n #return (z+zv*t) - sqrt( grating_r0**2 + toroid_c**2 - (x+xv*t)**2 - (y+yv*t)**2 - 2*sqrt( grating_r0**2*toroid_c**2 - toroid_c**2 * (y+yv*t)**2 ))\n #t1 = fsolve(fz,1.0,warning=False,xtol=1e-35,maxfev=3000)\n t = bisect(fz,0,3,xtol=float64(1e-35),rtol=float64(1e-35),maxiter=3000)\n #t = findroot(fz,float64(t1),tol=float64(1e-18),solver='bisect',maxsteps=10)\n #t = mathutil.root(fz,(float64(0.5),float64(2.5)),accuracy=1e-35)\n #print \"rootfind: %30.25f, %30.25f, t-t1: %30.25f\" % (t1, t, t-t1)\n #term1 = (2*gb**2*x*xv + 2*ga**2*y*yv + 2*ga**2*gb**2*z*zv - 2*ga**2*gb**2*grating_z0*zv)\n #term2 = -1.0* sqrt((-2*gb**2*x*xv - 2*ga**2*y*yv - 2*ga**2*gb**2*z*zv + 2*ga**2*gb**2*grating_z0*zv)**2 - \n # 4*(ga**2*gb**2*grating_r0**2 - gb**2*x**2 - ga**2*y**2 - ga**2*gb**2*z**2 + \n # 2*ga**2*gb**2*z*grating_z0 - ga**2*gb**2*grating_z0**2)*(-gb**2*xv**2 - ga**2*yv**2 - \n # ga**2*gb**2*zv**2))\n #term3 = 1.0/(2*(-gb**2*xv**2 - ga**2*yv**2 - ga**2*gb**2*zv**2))\n #t = (term1+term2)*term3\n #t2 = brenth(fz,0,3,xtol=float64(1e-35),rtol=float64(1e-35),maxiter=3000)\n #t3 = fmin(afz,1.5,xtol=float64(1e-35))\n #print \"fsolve: %30.25g bisect: %30.25g brenth: %30.25g \" % (t,t1,t2)\n #print \"f-bisect: %30.25g f-brent: %30.25g bisect-brent:%30.25g\" % (t-t1,t-t2,t1-t2)\n\n return t", "def alphabeta_search(state, d=1, cutoff_test=None, eval_fn=None, start_time=None, turn_number=None):\n global count\n global testing\n global BigInitialValue\n global MoveTimes\n\n player = state.to_move\n count = 0\n\n def max_value(state, alpha, beta, depth):\n global count, testing\n if testing:\n print(\" \"* depth, \"Max alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"* depth, \"Max cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = -BigInitialValue\n succ = state.game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"maxDepth: \", depth, \"Total:\", count, \"Successors: \", len(succ))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # A player can move repeatedly if opponent is completely blocked\n if state.to_move == s.to_move:\n v = max(v, max_value(s, alpha, beta, depth+1))\n else:\n v = max(v, min_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"* depth, \"max best value:\", v)\n if v >= beta:\n return v\n alpha = max(alpha, v)\n return v\n\n def min_value(state, alpha, beta, depth):\n global count\n if testing:\n print(\" \"*depth, \"Min alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"*depth, \"Min cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = BigInitialValue\n succ = state.game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"minDepth: \", depth, \"Total:\", count, \"Successors: \", len(succ))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # A player can move repeatedly if opponent is completely blocked\n if state.to_move == s.to_move:\n v = min(v, min_value(s, alpha, beta, depth+1))\n else:\n v = min(v, max_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"*depth, \"min best value:\", v)\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n\n def right_value(s, alpha, beta, depth):\n if s.to_move.id == state.to_move.id:\n return max_value(s, -BigInitialValue, BigInitialValue, 0)\n else:\n return min_value(s, -BigInitialValue, BigInitialValue, 0)\n\n def argmin(seq, fn):\n \"\"\"Return an element with lowest fn(seq[i]) score; tie goes to first one.\n >>> argmin(['one', 'to', 'three'], len)\n 'to'\n \"\"\"\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best\n\n def argmax(seq, fn):\n \"\"\"Return an element with highest fn(seq[i]) score; tie goes to first one.\n >>> argmax(['one', 'to', 'three'], len)\n 'three'\n \"\"\"\n return argmin(seq, lambda x: -fn(x))\n\n # Body of alphabeta_search starts here:\n cutoff_test = (cutoff_test or\n (lambda state,depth: depth>d or state.game.terminal_test(state)))\n eval_fn = eval_fn or (lambda state: state.game.utility(state, turn_number))\n action, state = argmax(state.game.successors(state),\n lambda a_s: right_value(a_s[1], -BigInitialValue, BigInitialValue, 0))\n\n # calculate move time, round to 2 decimal places, store for analysis\n MoveTimes.append(round(time.time() - start_time, 2))\n return action", "def optimize_params_elastic(defCoords,elastModGuess,poissRatioGuess,pressGuess,r0,\n surfTenRef,hoopStrech,merStrech,deltaRho,nReload,\n trueRotation,nPoints):\n \n #splitting data at apex into left and right side\n xDataLeft,zDataLeft,xDataRight,zDataRight = split_data(defCoords[:,0],defCoords[:,1])\n \n # initial guesses to start rountine\n initGuess = [elastModGuess,poissRatioGuess,pressGuess]\n\n intRange,arcLength = get_data_arc_len(xDataLeft,zDataLeft,xDataRight,zDataRight,r0)\n \n # calling out optimization routine with reload\n\n r=optimize.minimize(objective_fun_v2_elastic,initGuess,args=(deltaRho,\n hoopStrech,merStrech,surfTenRef,xDataLeft,zDataLeft,\n xDataRight,zDataRight,intRange,trueRotation,r0),\n method='Nelder-Mead',tol=1e-9)\n\n initGuess = [r.x[0],r.x[1],r.x[2]]\n y2DFinal = r.x[0]\n v2DFinal = r.x[1] \n pressureFinal = r.x[2]\n\n intRangeFinal,arcLength = get_data_arc_len(xDataLeft,zDataLeft,xDataRight,zDataRight,r0)\n xModel,zModel,fiModel,tauMerModel = elastic_model(y2DFinal,v2DFinal,pressureFinal,nPoints,intRangeFinal,deltaRho,hoopStrech,merStrech,surfTenRef)\n \n \n\n return xModel,zModel,y2DFinal,v2DFinal,pressureFinal", "def bimodal_u(self,tau21=0.864,tau31=0.864,Verbose=False):\n\n self.algo = 'bimodal'\n\n# Setup Bayesian parameters\n# -------------------------\n self.bayes_setup(tau21=tau21,tau31=tau31)\n\n# Reserve space for output\n# ------------------------\n N = self.lon.size\n self.a_F = - 99.99 * ones(N)\n self.h_F = - 99.99 * ones(N)\n self.r_F = - 99.99 * ones(N)\n self.frp_F = - 99.99 * ones(N)\n self.m = zeros(N).astype('boolean')\n \n S21, S31, F21, F31 = (self.S21,self.S31,self.F21,self.F31)\n\n if Verbose:\n if N>100:\n Np = list(range(0,N,N/100))\n Np = list(range(N))\n elif N>10:\n Np = list(range(0,N,N/10))\n else:\n Np = list(range(N))\n print(\"\")\n print(\" Unconstrained Bimodal Dozier\")\n print(\" ----------------------------\")\n print(\"\")\n print(\" % | Lon Lat b | r_F h_F\")\n print(\" | deg deg | % kW/m2\") \n print(\"---- | ------ ------ - | -------- --------\")\n\n# Estimate parameters for each fire\n# ---------------------------------\n for n in range(N):\n\n L21, L31 = (self.L21[n], self.L31[n])\n E21, E31 = (self.E21[n], self.E31[n])\n tau21, tau31 = (self.tau21[n], self.tau31[n])\n pixar = self.pixar[n]\n pow = self.pow[n]\n\n# Estimate admissible solutions for (Ts,Tf) in range\n# --------------------------------------------------\n ps, pf, kappa = bayes_single(L21, E21, tau21, \n L31, E31, tau31,\n S21, S31, F21, F31 )\n\n# Parameters in phase space\n# -------------------------\n r_F = pf * F21 / ( pf * F21 + ps * S21 ) # non-dimensional\n a_F = (pf/100.) * pixar # km2\n h_F = 0.001 * r_F * pow / a_F # kW/m2\n pow_F = r_F * pow # MW\n \n# Kernel density estimates\n# ------------------------\n i = ((ps+pf)>=0) # quality control\n if any(i):\n self.m[n] = True\n self.a_F[n] = mle_kde(a_F[i]) * 1e6 # m2\n self.r_F[n] = mle_kde(r_F[i]) # %\n self.h_F[n] = mle_kde(h_F[i]) # kW/m2\n self.pow_F[n] = mle_kde(frp_F[i]) # MW\n\n if Verbose:\n if n in Np:\n ip = int(0.5+100.*n/N)\n print(\"%3d%% | %7.2f %6.2f | %8.2f %8.2f\"%\\\n (ip,self.lon[n],self.lat[n], \\\n self.r_F[n],self.h_F[n]))", "def prepare_iterative_solution(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit the histogram of the input image under mask with the reference image.
def ce_fit(inp_image, ref_image, mask_image): hist_res = Util.histc(ref_image, inp_image, mask_image) args = hist_res["args"] scale = hist_res["scale"] data = [hist_res['data'], inp_image, hist_res["ref_freq_bin"], mask_image, int(hist_res['size_img']), hist_res['hist_len']] res = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data) resu = ["Final Parameter [A,B]:", res[0], "Final Chi-square :", -1*res[1], "Number of Iteration :", res[2]] corrected_image = inp_image*res[0][0] + res[0][1] result = [resu,"Corrected Image :",corrected_image] del data[:], args[:], scale[:] return result
[ "def calculate_2d_histogram(image, mask, color_base):\n print(\"HOL\")\n if color_base == 'HSV':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n range_hist = [0, 180, 0, 256]\n channels = [0, 1]\n elif color_base == 'LAB':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n range_hist = [0, 256, 0, 256]\n channels = [1, 2]\n elif color_base == 'YCrCb':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)\n channels = [1, 2]\n range_hist = [0, 256, 0, 256]\n else:\n raise Exception(\"Color Base Not Valid\")\n\n hist = cv2.calcHist([image], channels, mask, [16, 16], range_hist)\n cv2.normalize(hist, hist)\n\n return hist.flatten()", "def histogram(img):\n\n return np.histogram(img, bins=64, range=[0,255])[0]", "def hist(img):\n\treturn cv2.calcHist([img],[0],None,[256],[0,256])", "def _hist_match_image(im, targ, mask, inplace=True):\n # Copy?\n if not inplace:\n im = im.copy()\n\n # Add a small amount of random noise to break ties for sorting in next\n # step.\n im += 0.1 * np.random.rand(*im.shape)\n\n # Sort image pixels (we actually only need indices of sort)\n if mask is None:\n idcs = np.argsort(im.flat)\n else:\n idcs = np.argsort(im[mask].flat)\n\n # Replace image histogram with target histogram, using idcs to place\n # pixels at correct positions\n svim = np.empty(len(idcs))\n svim[idcs] = targ\n if mask is None:\n im[:] = svim.reshape(im.shape)\n else:\n im[mask] = svim\n\n # Return?\n if not inplace:\n return im", "def histogram_match(image, reference):\n return match_histograms(image, reference, multichannel=False)", "def calculate_3d_histogram(image, mask, color_base):\n\n if color_base == 'BGR':\n range_hist = [0, 256, 0, 256, 0, 256]\n else:\n raise Exception(\"Color Base is not valid\")\n\n hist = cv2.calcHist([image], [0, 1, 2], mask, [32, 32, 32], range_hist)\n cv2.normalize(hist, hist)\n\n return hist.flatten()", "def create_image_fits(base_dir,fits_img,outroot, bin_file, temp_file):\n bins, min_x, max_x, min_y, max_y = read_in(base_dir+'/'+bin_file,base_dir+'/'+temp_file)\n # Create image array\n x_len = int(max_x-min_x)\n y_len = int(max_y-min_y)\n temp_array = np.zeros((x_len,y_len))\n percentage_array = np.zeros((x_len,y_len))\n for bin in bins:\n for pixel in bin.pixels:\n #print(bin.temp)\n try:\n temp_array[int(pixel.pix_x-1),int(pixel.pix_y-1)] = int(bin.temp)\n percentage_array[int(pixel.pix_x-1),int(pixel.pix_y-1)] = float(bin.percentage)\n except:\n #print(bin.temp)\n pass\n # Copy header\n fits_ = fits.open(base_dir+'/'+fits_img)\n hdr = header=fits_[0].header\n # Change image\n hdu = fits.PrimaryHDU(temp_array)\n hdul = fits.HDUList([hdu])\n fits.writeto(base_dir+'/component_bins.fits', temp_array.T, hdr, overwrite=True)\n fits.writeto(base_dir+'/percentage_bins.fits', percentage_array.T, hdr, overwrite=True)", "def mask_from_fit(self, fit):\n if self.mask:\n return fit.mask\n else:\n return None", "def equalize_hist(input):\n return np.float32(skimage.exposure.equalize_hist(input.numpy()))", "def SetInput(self, input: 'itkHistogramF') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFF_SetInput(self, input)", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def histogram(source : Image, destination : Image = None, num_bins:int = 256, minimum_intensity : float = None, maximum_intensity : float = None, determine_min_max : bool = True) -> Image:\n image_to_process = source\n\n # workaround for 2D images; the 2D kernel doesn't work as\n # in Java (global_id starts a 1 instead of 0, only tested\n # on AMD Ryzen 4700U, Vega 7)\n # thus, we copy the 2D image in a 3D stack with one slice\n if (len(image_to_process.shape) == 2):\n temp = image_to_process\n image_to_process = create([1, temp.shape[0], temp.shape[1]])\n copy_slice(temp, image_to_process, 0)\n\n # print(\"image shape \" + str(image_to_process.shape))\n # print(str(pull(image_to_process)[0]))\n\n if minimum_intensity is None or maximum_intensity is None or determine_min_max:\n minimum_intensity = minimum_of_all_pixels(source)\n maximum_intensity = maximum_of_all_pixels(source)\n\n number_of_partial_histograms = source.shape[-2]\n\n # determine multiple histograms. one for each Y (row) in the image\n partial_histograms = create([number_of_partial_histograms, 1, num_bins])\n\n parameters = {\n \"src\":image_to_process,\n \"dst_histogram\":partial_histograms,\n \"minimum\": float(minimum_intensity),\n \"maximum\": float(maximum_intensity),\n \"step_size_x\": int(1),\n \"step_size_y\": int(1),\n \"step_size_z\": int(1)\n }\n\n constants = {\n \"NUMBER_OF_HISTOGRAM_BINS\":num_bins\n }\n\n global_sizes = [number_of_partial_histograms]\n execute(__file__,\n \"../clij-opencl-kernels/kernels/histogram_\" + str(len(image_to_process.shape)) + \"d_x.cl\",\n \"histogram_\" + str(len(image_to_process.shape)) + \"d\",\n global_sizes,\n parameters,\n constants=constants)\n\n # sum partial histograms\n if destination is None:\n destination = create([num_bins])\n\n sum_z_projection(partial_histograms, destination)\n\n return destination", "def histogram_equalize(im_orig):\n\n color_flag = False\n image = im_orig\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n image *= NORMALIZE\n hist_orig, bins = np.histogram(image, range(BINS))\n hist_cum = np.cumsum(hist_orig) #cumulative distribution function\n\n cum = ((hist_cum - hist_cum.min()) / ( hist_cum.max() - hist_cum.min())) * NORMALIZE\n\n im_eq = cum[image.astype(np.uint8)]\n\n hist_eq, bins = np.histogram(im_eq, range(BINS)) #before getting back to float64 does the histogram)\n\n im_eq /= NORMALIZE\n im_eq = im_eq.astype(np.float64)\n\n\n if color_flag:\n y_im[:, :, 0] = im_eq\n im_eq = yiq2rgb(y_im)\n\n im_eq = im_eq.clip(0,1)\n return [im_eq, hist_orig, hist_eq]", "def Histogram_Matching(inImFile, outImFile, refImFile,\n number_of_histogram_levels=1024,\n number_of_match_points=7,\n threshold_at_mean_intensity=False):\n inputIm = sitk.ReadImage(inImFile)\n referenceIm = sitk.ReadImage(refImFile)\n histMatchingFilter = sitk.HistogramMatchingImageFilter()\n histMatchingFilter.SetNumberOfHistogramLevels(number_of_histogram_levels)\n histMatchingFilter.SetNumberOfMatchPoints(number_of_match_points)\n histMatchingFilter.SetThresholdAtMeanIntensity(threshold_at_mean_intensity)\n outputIm = histMatchingFilter.Execute(inputIm, referenceIm)\n if outImFile is not None:\n sitk.WriteImage(outputIm, outImFile, True)\n return outputIm", "def _histogram(ndarray H, flatiter iter):\n cdef double *h\n cdef unsigned int clamp\n\n # Views\n clamp = <unsigned int>H.shape[0]\n h = <double*>H.data\n\n # Compute image histogram \n histogram(h, clamp, iter)\n\n return", "def hist_stretch(ld_img):\n grayscale_img = cv2.cvtColor(ld_img, cv2.COLOR_BGR2GRAY)\n double_img = im2double(grayscale_img)\n arr_min = np.amin(double_img)\n arr_max = np.amax(double_img)\n out_img = stretch(double_img, arr_min, arr_max)\n return double2im(out_img)", "def final_mask(path, output_mask, percentage=0.5):\n with fits.open(path, \"readonly\") as temp_mask:\n mask_data = temp_mask[0].data\n mask_header = temp_mask[0].header\n mask_data[mask_data >= percentage] = 1\n mask_data[mask_data < percentage] = 0\n fits.writeto(output_mask, mask_data, mask_header, clobber=True)", "def fit_image(image, mask=None):\n\n if mask is None:\n mask = np.ones_like(image)\n\n # Get dimensions\n y_dim, x_dim = image.shape\n\n # Flatten the mask to eliminate masked off indices\n mask_flat = np.array(mask.flatten(), dtype=bool)\n\n # The linalg function needs flattened arrays of coordinates and values\n x, y = np.linspace(0, x_dim, x_dim), np.linspace(0, y_dim, y_dim)\n X, Y = np.meshgrid(x, y, copy=False)\n X, Y = X.flatten(), Y.flatten()\n\n # Set up arrays for linalg function\n A = np.array([X*0+1, X, Y, X**2, X**2*Y, X**2*Y**2, Y**2, X*Y**2, X*Y]).T\n B = image.flatten()\n\n coeff, r, rank, s = np.linalg.lstsq(A[mask_flat], B[mask_flat], rcond=None)\n\n image_fit = np.sum(coeff * A, axis=1).reshape(image.shape)\n image_fit_masked = image_fit * mask\n\n #return (np.sum(coeff * A, axis=1) * mask.flatten()).reshape(image.shape)\n\n return image_fit, image_fit_masked", "def fullhistogram(img):\n maxt = img.max()\n if maxt == 0:\n return np.array([img.size])\n return nhistogram(img, np.arange(maxt+2))[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the position of the commone line in 3D Formula is (RB^T zhat) cross (RA^T zhat) Returns phi, theta of the common line in degrees. theta always < 90 Notice you don't need to enter psi's; they are irrelevant
def common_line_in3D(phiA,thetaA,phiB,thetaB): from math import pi, sqrt, cos, sin, asin, atan2 piOver=pi/180.0; ph1 = phiA*piOver; th1 = thetaA*piOver; ph2 = phiB*piOver; th2 = thetaB*piOver; #nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ; #ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ; #nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR); nx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2) ny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2) nz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2) norm = nx*nx + ny*ny + nz*nz if norm < 1e-5: #print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB return 0.0, 0.0 if nz<0: nx=-nx; ny=-ny; nz=-nz; #thetaCom = asin(nz/sqrt(norm)) phiCom = asin(nz/sqrt(norm)) #phiCom = atan2(ny,nx) thetaCom = atan2(ny, nx) return phiCom*180.0/pi , thetaCom*180.0/pi
[ "def theta_finder(theta, point_a, point_b, point_c, point_c_new):\n x, y, z = parametrized_circle(point_a, point_b, point_c, theta)\n residual = (x - point_c_new[0])**2 + (y - point_c_new[1])**2 + (z - point_c_new[2])**2\n return residual", "def theta_center(zpos, rho, pitch_angle, trap_profile):\n if trap_profile.is_trap == True:\n\n Bmin = trap_profile.field_strength_interp(rho,0)\n Bcurr = trap_profile.field_strength_interp(rho,zpos)\n\n theta_center_calc = math.asin(math.sqrt(Bmin / Bcurr) * math.sin(pitch_angle * math.pi / 180)) * 180 / math.pi\n \n return theta_center_calc\n \n else:\n print(\"ERROR: Given trap profile is not a valid trap\")\n return False", "def collision_distance(phi, theta, xneut, zneut):\n xbar = lambda x: x - xneut\n zbar = lambda z: z - zneut\n a = phi**2 + theta**2\n k = xbar*phi + zbar*theta\n c = xbar**2 + zbar**2 - vacradius**2\n d1 = (-k + sqrt(k**2 - a*c))/a\n d2 = (-k - sqrt(k**2 - a*c))/a\n return d1, d2", "def calc_theta_456(self, base_to_EE_RMat_val, R0_3_val, theta123, px, py, pz, multi_sol_check=False):\n theta1 = theta123[0]\n theta2 = theta123[1]\n theta3 = theta123[2]\n\n # create rotation matrix from link 3 to link 6 using the fact that \n # base_to_EE_RMat = R0_3 * R3_6\n R3_6 = R0_3_val.transpose()*base_to_EE_RMat_val#R0_3_val.inv(\"LU\") * base_to_EE_RMat_val\n\n # use the rotation matrix for the last 3 joints to solve for the three joint angles\n theta4 = atan2(R3_6[2,2], -R3_6[0,2])\n theta5 = atan2(sqrt(R3_6[0,2]*R3_6[0,2] + R3_6[2,2]*R3_6[2,2]), R3_6[1,2])\n\n theta5_neg = atan2(-sqrt(pow(R3_6[0,2],2) + pow(R3_6[2,2],2)), R3_6[1,2])\n theta5_2 = atan2(sqrt(pow(R3_6[1,0],2) + pow(R3_6[1,1],2)), R3_6[1,2]) #equivalent atan2 calculation\n \n theta6 = atan2(-R3_6[1,1], R3_6[1,0]) ## \n\n ### iterate through potential combinations of solutions and compare to desired end effector position\n if multi_sol_check:\n min_error = 1000.0\n min_error_idx = -1\n \n sols = enumerate_sols(self._check_2pi('theta4', [theta4.evalf()]), self._check_2pi('theta5', [theta5.evalf(), theta5_neg.evalf(), theta5_2.evalf()]), self._check_2pi('theta6',[theta6.evalf()]))\n \n for i,s in enumerate(sols):\n print(\"theta4:\\t{}\\ttheta5:\\t{}\\ttheta6:\\t{}\".format(s[0],s[1],s[2]))\n\n T0_G = self.calc_T0_G(theta1,theta2,theta3, s[0],s[1],s[2])\n errors = calc_error(T0_G, px, py, pz)\n print(errors[3])\n if errors[3] < min_error:\n min_error = errors[3]\n min_error_idx = i\n \n return sols[min_error_idx]\n else:\n return (theta4, theta5, theta6)", "def recover_theta_rot(H):\n '''\n Minor annoyance: Recall that there are two separate rotation matrices:\n | cos(R) , sin(R) | | cos(R) , -sin(R) |\n |-sin(R) , cos(R) | | sin(R) , cos(R) | \n In the left one, positive R implies counter-clockwise rotation.\n In the right one, positive R implies clockwise rotation.\n We need to disambiguate this to always return theta s.t.\n Positive theta -> counter-clockwise rotation.\n '''\n H00 = min(max(H[0,0], -1.0), 1.0) # clamp to [-1.0, 1.0] to avoid numerical instability\n H01 = min(max(H[0,1], -1.0), 1.0)\n H10 = min(max(H[0,1], -1.0), 1.0)\n theta_0 = math.degrees(math.acos(H00))\n theta_1 = math.degrees(math.asin(H01))\n if theta_0 >= 0.0 and theta_1 >= 0.0:\n # theta is in counter-clockwise mode\n return theta_0\n else:\n # theta is in clockwise mode\n return -theta_0", "def calc_torsion_phi(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n paC = prev_res.get_atom('C')\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n return AtomMath.calc_torsion_angle(paC, aN, aCA, aC)", "def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle", "def theta_phi(Collimator_square, sample_point):\n p1,p2,p3,p4=Collimator_square\n\n points = np.array([sample_point-p1, sample_point-p2, sample_point-p3, sample_point-p4])\n points=points.transpose(1,0,2) #shape: (pointsNum,4,3)\n\n theta = np.arctan2(points[:, :, 0],points[:, :, 1] )\n\n norm_x_y=np.sqrt(points[:, :, 0]**2+points[:, :, 1]**2)\n phi = np.arctan2(norm_x_y, points[:, :, 2])\n\n return theta, phi", "def calculate_theta_vals(self) -> None:\n A = np.zeros(self.num_points) # Inappropriate names, but they mirror Knuth's notation.\n B = np.zeros(self.num_points)\n C = np.zeros(self.num_points)\n D = np.zeros(self.num_points)\n R = np.zeros(self.num_points)\n\n # Calculate the entries of the five vectors.\n # Skip first and last point if path is non-cyclic.\n point_ind = range(self.num_points) if self.is_cyclic else range(1, self.num_points - 1)\n for i in point_ind:\n z_h = self.points[i - 1]\n z_i = self.points[i]\n z_j = self.points[(i + 1) % self.num_points]\n\n A[i] = z_h.alpha / (z_i.beta ** 2 * z_h.d_val)\n B[i] = (3 - z_h.alpha) / (z_i.beta ** 2 * z_h.d_val)\n C[i] = (3 - z_j.beta) / (z_i.alpha ** 2 * z_i.d_val)\n D[i] = z_j.beta / (z_i.alpha ** 2 * z_i.d_val)\n R[i] = -B[i] * z_i.psi - D[i] * z_j.psi\n\n # Set up matrix M such that the soln. Mx = R are the theta values.\n M = np.zeros((self.num_points, self.num_points))\n for i in range(self.num_points):\n # Fill i-th row of M\n M[i][i - 1] = A[i]\n M[i][i] = B[i] + C[i]\n M[i][(i + 1) % self.num_points] = D[i]\n\n # Special formulas for first and last rows of M with non-cyclic paths.\n if not self.is_cyclic:\n # First row of M\n alpha_0 = self.points[0].alpha\n beta_1 = self.points[1].beta\n xi_0 = (alpha_0 ** 2 * self.begin_curl) / beta_1 ** 2\n M[0][0] = alpha_0 * xi_0 + 3 - beta_1\n M[0][1] = (3 - alpha_0) * xi_0 + beta_1\n R[0] = -((3 - alpha_0) * xi_0 + beta_1) * self.points[1].psi\n # Last row of M\n alpha_n_1 = self.points[-2].alpha\n beta_n = self.points[-1].beta\n xi_n = (beta_n ** 2 * self.end_curl) / alpha_n_1 ** 2\n M[-1][-2] = (3 - beta_n) * xi_n + alpha_n_1\n M[-1][-1] = (beta_n * xi_n + 3 - alpha_n_1)\n R[-1] = 0\n\n # Solve for theta values.\n thetas = np.linalg.solve(M, R)\n for i, point in enumerate(self.points):\n point.theta = thetas[i]", "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def determine_in_plane_angle(self, qxy, qz=0.0, theta_incident=0.0):\n \n k = self.get_k()\n if theta_incident==None:\n # Use internal value\n theta_incident = self.theta_incident\n theta_incident_rad = np.radians(theta_incident)\n \n from scipy.optimize import fsolve\n \n def equations(p, qxy=qxy, qz=qz, theta_incident=theta_incident, k=k):\n \n # The variable we are fitting for\n omega_rad, = p\n \n # Non-fit values: qxy, qz, k, theta_incident, k\n \n return ( (qxy*cos(omega_rad))**2 + (qxy*sin(omega_rad)+k*cos(theta_incident_rad))**2 + (qz-k*sin(theta_incident_rad))**2 - k**2 )\n\n \n omega_rad, = fsolve(equations, ( np.radians(5.0) ) )\n #print( 'omega_rad = %.2f (err = %.4f)' % ( omega_rad, equations((omega_rad, )) ) )\n \n omega = abs( np.degrees(omega_rad) )\n #print( 'omega = %.2f (err = %.4f)' % ( omega, equations((omega_rad, )) ) )\n \n \n return omega", "def calculate_parabolic(radius_throat, area_ratio, theta_i, \n theta_exit, percent_length_conical):\n\n # First curve (fc)\n # (Nozzle length equation taken from source [5])\n length_nozzle = percent_length_conical * radius_throat * ((math.sqrt(area_ratio) - 1) \\\n + 1.5 * ((1 / math.cos(15 * math.pi / 180)) - 1)) / math.tan(15 * math.pi / 180)\n\n angle_fc = -(math.pi + (45 * math.pi / 180))\n N_STEPS_FC = 300\n step_fc = (-math.pi / 2 - angle_fc) / N_STEPS_FC\n theta_fc = np.arange(-3 / 4 * math.pi, -math.pi / 2 + 0.001, step_fc)\n\n x_fc = np.cos(theta_fc) * 1.5 * radius_throat\n y_fc = np.sin(theta_fc) * 1.5 * radius_throat + (1.5 * radius_throat\n + radius_throat)\n\n # np.savetxt('firstCurve.csv', (x_fc, y_fc), delimiter=\";\")\n\n # Second curve (sc)\n angle_sc = -math.pi / 2\n N_STEPS_SC = 300\n step_sc = theta_i / N_STEPS_SC\n theta_sc = np.arange(angle_sc, theta_i - math.pi / 2 + step_sc, step_sc)\n\n x_sc = np.cos(theta_sc) * 0.382 * radius_throat\n y_sc = np.sin(theta_sc) * 0.382 * radius_throat + (0.382 * radius_throat\n + radius_throat)\n\n # np.savetxt('secondCurve.csv', (x_sc, y_sc), delimiter=\";\")\n\n # Third curve (tc)\n x_sc_endpoint = math.cos(theta_i - math.pi / 2) * 0.382 * radius_throat\n y_sc_endpoint = math.sin(theta_i - math.pi / 2) * 0.382 * radius_throat \\\n + (0.382 * radius_throat + radius_throat)\n\n y_exit = math.sqrt(area_ratio) * radius_throat\n\n matrix_y = np.array([[y_sc_endpoint ** 2, y_sc_endpoint, 1], \n [y_exit ** 2, y_exit, 1],\n [2 * y_sc_endpoint, 1, 0]])\n matrix_x = np.array([x_sc_endpoint, length_nozzle, 1 / math.tan(theta_i)])\n inverse_matrix_y = np.linalg.inv(matrix_y)\n parabola_coefficients = inverse_matrix_y.dot(matrix_x) \n\n coefficient_a = parabola_coefficients[0]\n coefficient_b = parabola_coefficients[1]\n coefficient_c = parabola_coefficients[2]\n\n STEPSIZE_Y_TC = 0.001\n y_tc = np.arange(y_sc_endpoint, y_exit + STEPSIZE_Y_TC, STEPSIZE_Y_TC)\n x_tc = coefficient_a * y_tc**2 + coefficient_b * y_tc + coefficient_c\n\n # np.savetxt('thirdCurve.csv', (x_tc, y_tc), delimiter=\";\")\n\n x_nozzle = np.concatenate((x_fc,x_sc, x_tc),axis=0)\n y_nozzle = np.concatenate((y_fc,y_sc, y_tc),axis=0)\n\n np.savetxt('rao_thrust_optimized_parabola.csv',\n (x_nozzle, y_nozzle), delimiter=\";\")\n\n export_parabolic(x_nozzle, y_nozzle)\n\n plot_parabolic(x_nozzle, y_nozzle, radius_throat, theta_i, theta_exit,\n x_sc_endpoint, y_sc_endpoint, y_exit, length_nozzle)\n\n return length_nozzle", "def theta_transversal(hoy:np.array=HOYS_DEFAULT)->float : \n\n return np.arctan(np.sin(np.radians(azim(hoy))) * np.tan(np.radians(sgh.z(hoy))))", "def find_omega_theta(R):\n #YOUR CODE HERE\n\n theta = math.acos((np.trace(R) - 1)/2)\n omega = (1/(2*math.sin(theta)))*np.array([R[2,1]-R[1,2], R[0,2]-R[2,0],R[1,0]-R[0,1]])\n return np.array([omega,theta])", "def Distance2RRhoPhi(r1,r2,r3):\n \n # Calculate the square-distances of \n # each pair of atoms.\n r1 = np.array(r1)\n r2 = np.array(r2) \n r3 = np.array(r3)\n \n rr1 = r1*r1\n rr2 = r2*r2\n rr3 = r3*r3\n \n return TriatomicRadialPolar.DistanceSquared2RRhoPhi(rr1,rr2,rr3)", "def compute_traj_form_joystick_rel(joystick):\n centre, direction = joystick[0], joystick[1]\n direction = unitary_vec(direction)\n print(joystick)\n traj = []\n r = []\n V = get_verins_12()\n pos = direct_rel_12(V)\n # Computing radii\n for leg in range(4):\n radius = np.sqrt((pos[leg * 3 + 0] - centre[0]) ** 2 + (pos[leg * 3 + 1] - centre[1]) ** 2)\n r.append(radius)\n r_max = max(r)\n n = int(2 * np.pi * r_max / TRAJ_ACCUR)\n for i in range(n - 10):\n L = []\n for leg in range(4):\n alpha = np.arccos(abs((centre[1] - pos[3 * leg + 1])) / r[leg])\n signe_cx = (centre[0] - pos[leg * 3 + 0]) / abs(centre[0] - pos[leg * 3 + 0])\n signe_cy = (centre[1] - pos[leg * 3 + 1]) / abs(centre[1] - pos[leg * 3 + 1])\n if signe_cx < 0 and signe_cy < 0:\n alpha = + np.pi / 2 - alpha\n if signe_cx > 0 and signe_cy < 0:\n alpha = + np.pi / 2 + alpha\n if signe_cx < 0 and signe_cy > 0:\n alpha = - np.pi / 2 + alpha\n if signe_cx > 0 and signe_cy > 0:\n alpha = - np.pi / 2 - alpha\n L = np.append(L, (r[leg] * np.cos((2 * i * np.pi) / n + alpha) + centre[0],\n r[leg] * np.sin((2 * i * np.pi) / n + alpha) + centre[1],\n pos[3 * leg + 2]))\n traj.append(L)\n return traj", "def circle_on_the_sky(ls,bs,th,n_points=100):\n from scipy.spatial.transform import Rotation as R\n\n thr = np.deg2rad(th)\n\n # start from the circle centre point at galactic coordiantes 0/0 on that sphere\n # TS: the difficult thing is just to figure out what angle corresponds to what axis\n vec = np.array([np.cos(thr),0,0])\n # rotate that point to the wanted position\n r = R.from_euler('yz',[bs+180,ls+180],degrees=True)\n rot_vec = r.apply(vec)\n # initial and rotated point are NOT UNIT VECTORS, thus normalise when required\n\n # get points of that circle (radius sin(th), AT position cos(th))\n alpha = np.linspace(-np.pi,np.pi,n_points)\n circle_vec = np.array([np.ones(len(alpha))*np.cos(thr),\n np.sin(thr)*np.cos(alpha),\n np.sin(thr)*np.sin(alpha)])\n # rotate these points in the same way\n rot_circle_vec = []\n for i in range(len(alpha)):\n rot_circle_vec.append(r.apply(circle_vec[:,i]))\n rot_circle_vec = np.array(rot_circle_vec).T\n # should not happen, but let's make sure\n rot_circle_vec[2,rot_circle_vec[2,:] < -1] = -1\n \n # calculate l and b coordiantes from (cartesian to spherical on unit sphere)\n b_calc = np.rad2deg(np.arcsin(rot_circle_vec[2,:]/\n vector_length(rot_circle_vec[0,:],\n rot_circle_vec[1,:],\n rot_circle_vec[2,:])))\n l_calc = np.rad2deg(np.arctan2(rot_circle_vec[1,:],rot_circle_vec[0,:]))\n \n return l_calc,b_calc", "def getTwoTheta(self):\n BEAM = vec3(self.dict[\"beam\"])\n ROT = vec3(self.dict[\"rot\"]).normalize()\n camY = ROT.cross(BEAM)\n\n XDSdetector_X = vec3(self.dict[\"detector_X\"]).normalize()\n XDSdetector_Y = vec3(self.dict[\"detector_Y\"]).normalize()\n #XDSdetector_Z = XDSdetector_X.cross(XDSdetector_Y)\n\n #print beam.angle(XDSdetector_Z)*r2d\n if abs(ROT * XDSdetector_X) - 1 <= 0.05:\n detecorVector = -XDSdetector_Y\n #print 1\n elif abs(ROT * XDSdetector_Y) - 1 <= 0.05:\n detecorVector = XDSdetector_X\n #print 2\n else:\n raise Exception, \"Can't calculate TwoTheta angle\" \n return camY.angle(detecorVector)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine 2D alignent parameters including mirror
def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2): t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":mirror1,"scale":1.0}) t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":mirror2,"scale":1.0}) tt = t2*t1 d = tt.get_params("2D") return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "mirror" ]
[ "def _align_by_DAPI(data_1, data_2, channel_index=0, upsample_factor=2):\n images = data_1[channel_index], data_2[channel_index]\n _, offset = ops.process.Align.calculate_offsets(images, upsample_factor=upsample_factor)\n offsets = [offset] * len(data_2)\n aligned = ops.process.Align.apply_offsets(data_2, offsets)\n return aligned", "def rmsd_align(x1, x2):\n\n assert x1.shape == x2.shape\n\n x1 = x1 - jnp.mean(x1, axis=0)\n x2 = x2 - jnp.mean(x2, axis=0)\n\n rotation = get_optimal_rotation(x1, x2)\n\n xa = x1\n xb = x2 @ rotation\n\n return xa, xb", "def image_align(first_image, second_image):\r\n\r\n high_diff = (second_image.shape[0] - first_image.shape[0]) // 2\r\n width_diff = (second_image.shape[1] - first_image.shape[1]) // 2\r\n\r\n align_image = second_image[high_diff: high_diff + first_image.shape[0],\r\n width_diff: width_diff + first_image.shape[1],\r\n :]\r\n\r\n\r\n assert align_image.shape == first_image.shape\r\n\r\n return align_image", "def test_align(self):\n al = align(self.amp1, self.amp2).m\n\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert.mean(axis=0)[0], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[1], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[2], 0, delta=TestAlign.DELTA)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def new_mirror(self,alongx,alongy):\n Knew = K.clone()\n if alongx:\n Knew[0,2] = size[0]-Knew[0,2]\n if alongy:\n Knew[1,2] = size[1]-Knew[1,2]\n return CameraInfo(self.size,Knew,self.dist)", "def affine_align(x, y, p1, p2, g, s):\n #Create M, Ix, and Iy as Y x X matrices of 0's\n M = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Ix = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Iy = [[0]*(len(x)+1) for i in range(len(y)+1)]\n #Set up initial values for Ix and Iy\n #M infs along both axes\n for i in range(1, len(y)+1):\n M[i][0] = -math.inf\n for j in range(1, len(x)+1):\n M[0][j] = -math.inf\n #Ix: Aligning X with gap, horizontal move, infs along top row\n for i in range(0, len(y)+1):\n Ix[i][0] = -math.inf\n #Gap penalties along left column\n for j in range(1, len(x)+1):\n Ix[0][j] = -g if Ix[0][j-1] == -math.inf else Ix[0][j-1] - s\n #Iy: Aligning Y with gap, vertical move, infs along left column\n for j in range(0, len(x)+1):\n Iy[0][j] = -math.inf\n #Gap penalties along top row\n for i in range(1, len(y)+1):\n Iy[i][0] = -g if Iy[i-1][0] == -math.inf else Iy[i-1][0] - s\n #Populate remaining cells\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n M[i][j] = max(M[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Ix[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Iy[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2))\n Ix[i][j] = max(M[i][j-1] - g,\n Ix[i][j-1] - s)\n Iy[i][j] = max(M[i-1][j] - g,\n Iy[i-1][j] - s)\n #TRACEBACK\n x_ret=\"\"; y_ret=\"\"\n i = len(y); j = len(x)\n #Determine start matrix\n align_scores = (M[i][j], Iy[i][j], Ix[i][j])\n matrix_idx = align_scores.index(max(align_scores))\n #matrix_key will track the current matrix through the traceback\n matrix_key = [\"M\", \"Iy\", \"Ix\"][matrix_idx]\n while i > 0 and j > 0:\n #From M: Check diagonal moves back to all three matrices, align characters\n if matrix_key == \"M\":\n if M[i][j] == M[i-1][j-1] + p1 or M[i][j] == M[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"M\"\n elif M[i][j] == Iy[i-1][j-1] + p1 or M[i][j] == Iy[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Iy\"\n elif M[i][j] == Ix[i-1][j-1] + p1 or M[i][j] == Ix[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Ix\"\n #From Iy: Check vertical move to Iy and M, align y character with x gap\n elif matrix_key == \"Iy\":\n if Iy[i][j] == M[i-1][j] - g:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"M\"\n elif Iy[i][j] == Iy[i-1][j] - s:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"Iy\"\n #From Ix: Check horizontal move to Ix and M, align x character with y gap\n elif matrix_key == \"Ix\":\n if Ix[i][j] == M[i][j-1] - g:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"M\"\n elif Ix[i][j] == Ix[i][j-1] - s:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"Ix\"\n #Finish sequence if edge was reached\n #i>0 means mach remaining characters in y with gaps in x\n if i > 0:\n x_ret = (\"_\"*i) + x_ret\n y_ret = y[0:i] + y_ret\n #j>0 means mach remaining characters in x with gaps in y\n if j > 0:\n x_ret = x[0:j] + x_ret\n y_ret = (\"_\"*j) + y_ret\n #Return alinged strings\n return (x_ret, y_ret)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def _le_annot_parms(self, annot, p1, p2, fill_color):\n w = annot.border[\"width\"] # line width\n sc = annot.colors[\"stroke\"] # stroke color\n if not sc: # black if missing\n sc = (0,0,0)\n scol = \" \".join(map(str, sc)) + \" RG\\n\"\n if fill_color:\n fc = fill_color\n else:\n fc = annot.colors[\"fill\"] # fill color\n if not fc:\n fc = (1,1,1) # white if missing\n fcol = \" \".join(map(str, fc)) + \" rg\\n\"\n # nr = annot.rect\n np1 = p1 # point coord relative to annot rect\n np2 = p2 # point coord relative to annot rect\n m = Matrix(self._hor_matrix(np1, np2)) # matrix makes the line horizontal\n im = ~m # inverted matrix\n L = np1 * m # converted start (left) point\n R = np2 * m # converted end (right) point\n if 0 <= annot.opacity < 1:\n opacity = \"/H gs\\n\"\n else:\n opacity = \"\"\n return m, im, L, R, w, scol, fcol, opacity", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def test_align_points(self):\n mv = [\n [0, 0, 5],\n [5, 0, 5],\n [0, 5, 5]\n ]\n sv = [\n [0, 0, 0],\n [5, 0, 0],\n [0, 5, 0]\n ]\n al = align(self.amp1, self.amp2, mv=mv, sv=sv, method='contPoints').m\n zMax = self.amp1.vert[:, 2].max() - 5\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert[:, 2].max(), zMax, delta=TestAlign.DELTA)", "def _align_by_channel(data_1, data_2, channel_index1=0, channel_index2=0, upsample_factor=1, data_1_keepchannels=None, data_2_keepchannels=None):\n\n # add new axis to single-channel images\n if data_1.ndim == 2:\n data_1 = data_1[np.newaxis,:]\n if data_2.ndim == 2:\n data_2 = data_2[np.newaxis,:]\n\n print('aligning')\n images = data_1[channel_index1], data_2[channel_index2]\n _, offset = ops.process.Align.calculate_offsets(images, upsample_factor=upsample_factor)\n offsets = [offset] * len(data_2)\n aligned = ops.process.Align.apply_offsets(data_2, offsets)\n\n if (data_1_keepchannels == None) & (data_2_keepchannels != None):\n aligned = aligned[data_2_keepchannels,:]\n if aligned.ndim == 2:\n aligned = aligned[np.newaxis,:]\n print(data_1.shape)\n print(aligned.shape)\n\n elif (data_1_keepchannels != None) & (data_2_keepchannels == None):\n data_1 =data_1[data_1_keepchannels,:]\n if data_1.ndim == 2:\n data_1 = data_1[np.newaxis,:]\n\n else:\n data_1 =data_1[data_1_keepchannels,:]\n aligned = aligned[data_2_keepchannels,:]\n if aligned.ndim == 2:\n aligned = aligned[np.newaxis,:]\n if data_1.ndim == 2:\n data_1 = data_1[np.newaxis,:]\n\n aligned = np.vstack((data_1, aligned))\n print(aligned.shape)\n return aligned", "def align(self):\n ...", "def alignment_params(screen):\r\n \r\n intersections = []\r\n alignment_param = 0\r\n \r\n \r\n moves = {0:(0,1), 1: (0,-1), 2: (1, 0), 3: (-1, 0)}\r\n \r\n for tile in screen:\r\n intersect = True\r\n pos_x, pos_y = tile\r\n \r\n if screen[tile] == 35:\r\n for i in range(4):\r\n dx, dy = moves[i]\r\n test_pos = (pos_x + dx, pos_y + dy)\r\n if test_pos not in screen or screen[test_pos] != 35:\r\n intersect = False\r\n break\r\n \r\n if intersect:\r\n intersections.append(tile)\r\n alignment_param += pos_x * pos_y\r\n \r\n for tile in intersections:\r\n screen[tile] = 48\r\n \r\n return screen, alignment_param", "def alignment(gram1, gram2):\n # BUG: this loss function causes abnormal optimization behaviors, see\n # comments in past commits\n\n alignment = frobenius_inner_prod(gram1, gram2) /\\\n m.sqrt(frobenius_inner_prod(gram1, gram1) *\n frobenius_inner_prod(gram2, gram2))\n return alignment", "def align(stroke1, stroke2):\n\n x1 = np.array(stroke1.x)\n x2 = np.array(stroke2.x)\n y1 = np.array(stroke1.y)\n y2 = np.array(stroke2.y)\n\n d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n m = d - np.min(d)\n\n Ix1 = np.argmax(x1)\n Ix2 = np.argmax(x2)\n Iy1 = np.argmax(y1)\n Iy2 = np.argmax(y2)\n\n ix1 = np.argmin(x1)\n ix2 = np.argmin(x2)\n iy1 = np.argmin(y1)\n iy2 = np.argmin(y2)\n\n # rephasing :\n u = np.array([(Ix1 - Ix2), (Iy1 - Iy2), (ix1 - ix2), (iy1 - iy2)])\n indice_period = np.argmin(np.abs(u))\n period = u[indice_period]\n new_x1 = np.array(x1[period:].tolist() + x1[0:period].tolist())\n new_y1 = np.array(y1[period:].tolist() + y1[0:period].tolist())\n x1 = new_x1\n y1 = new_y1\n\n # resorting : if symetric part, revert it\n mx = np.max((x1, x2), 0)\n my = np.max((y1, y2), 0)\n sym_score = abs(x1 - x2[::-1]) + abs(y1 - y2[::-1])\n if len(x1[sym_score < 50]) > 20:\n x1[sym_score < 40] = x1[sym_score < 40][::-1]\n y1[sym_score < 40] = y1[sym_score < 40][::-1]\n\n new_d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n new_m = new_d - min(new_d)\n\n return x1, y1, d, new_d, m, new_m", "def align(img, left_eye, right_eye):\n left_eye_x, left_eye_y = left_eye\n right_eye_x, right_eye_y = right_eye\n point_3rd, direction = (left_eye, -1) if left_eye_y > right_eye_y else (right_eye, 1)\n\n # np.linalg.norm is being used for euclidean distance\n a = np.linalg.norm(np.array(left_eye) - np.array(point_3rd))\n b = np.linalg.norm(np.array(right_eye) - np.array(point_3rd))\n c = np.linalg.norm(np.array(right_eye) - np.array(left_eye))\n\n if b != 0 and c != 0:\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n angle = (angle * 180) / math.pi\n if direction == -1:\n angle = 90 - angle\n img = Image.fromarray(img)\n img = np.array(img.rotate(direction * angle))\n\n return img", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a text file that is composed of columns of numbers into spider doc file
def create_spider_doc(fname,spiderdoc): from string import atoi,atof infile = open(fname,"r") lines = infile.readlines() infile.close() nmc = len(lines[0].split()) table=[] for line in lines: data = line.split() for i in xrange(0,nmc): data[i] = atof(data[i]) table.append(data) drop_spider_doc(spiderdoc ,table)
[ "def parse_docx_file(path, name):\n document = open_file(path)\n i = 0\n with open(name, 'w', encoding='utf-8') as file:\n fieldnames = ['word', 'meaning']\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n for table in document.tables:\n iterator = iter(table.rows)\n while i < len(table.rows):\n extract_row(iterator, writer)\n i += 1\n print_percentage(i, table)", "def preprocess_text(xml_filepath, start_index, end_index):\n \n with open(xml_filepath, \"r\") as file:\n #lines = file.readlines()\n \n #instead of reading the whole file we go for (end_index+100) lines\n number_of_lines = end_index+200 #Extra lines for initial non-row lines and blank bodies\n lines = []\n for i in range(number_of_lines):\n line = file.readline()\n if(len(line)==0): #EOF\n break\n lines.append(line)\n \n lines = \"\".join(lines)\n lines_bs = bs(lines, \"lxml\")\n \n row_bodies = [x.get(\"body\") for x in lines_bs.find_all(\"row\")]\n row_bodies_tag_free = []\n\n for row_body in row_bodies:\n #text = bs(row_body,'html.parser').get_text().strip()\n text = bs(row_body,'lxml').get_text().strip()\n if(len(text)>0): #Discarding empty bodies\n row_bodies_tag_free.append(text)\n \n row_bodies_tag_free = row_bodies_tag_free[start_index:end_index] #array of documents<each in form of string>\n \n \n tokenized_documents = []\n for row_body_tag_free in row_bodies_tag_free:\n tokenized_documents.append(process_document(row_body_tag_free))\n \n return tokenized_documents", "def doc_to_df(self, doc_no):\n doc_txt = pd.DataFrame()\n i = 1\n with open ('{doc_id}.txt'.format(doc_id = doc_no)) as file:\n for line in file:\n words = pd.Series(line.split(' '))\n doc_txt = doc_txt.append(words, ignore_index=True)\n return doc_txt", "def read_collection(file_path):\n\n\twith open(file_path, 'r') as wholeFile:\n\t\ttext = wholeFile.read()\n\t\ttext = text.replace('</DOC>', '</DOC> BREAK_NEW_DOC')\n\t\tdocuments = text.split('BREAK_NEW_DOC')\n\treturn documents", "def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))", "def parse(file_name, int_cols):\n data = []\n with open(file_name) as f:\n headers = f.readline().strip().split(',')\n num_cols = len(headers)\n\n for line in f.readlines():\n row_data = line.strip().split(',')\n row = {}\n for i in range(num_cols):\n if headers[i] in int_cols:\n row[headers[i]] = int(row_data[i])\n else:\n row[headers[i]] = row_data[i]\n data.append(row)\n return data", "def parse_file(file_name):\n with open(file_name, 'r') as f:\n all_lines = f.readlines()\n\n raw_markdown = ''.join(all_lines)\n\n mistle_doc = Document(raw_markdown)\n\n return mistle_doc", "def bulk_txt_processor(file, delimiter):\n try:\n make_directory(\"temp\")\n with open(file, 'r', encoding='utf-8') as fh:\n output = list()\n for line in fh.readlines():\n genre = re.match(r'(.*)<(.*)>\\s(.*)', line)[2]\n print(genre)\n artists = re.match(r'(.*)<(.*)>\\s(.*)', line)[3].split(delimiter)\n print(artists)\n _index = 0\n while _index <= len(artists)//10:\n selected_artists = artists[_index*10:(_index+1)*10] # extract 10 artists each time\n string_of_artists = \",\".join(selected_artists)\n with open(os.path.join('temp', f'{genre}_{_index+1}.txt'), 'w+', encoding='utf-8') as f:\n f.write(string_of_artists)\n f.close()\n output.append(f\"{genre}_{_index+1}\")\n _index += 1\n fh.close()\n return output\n except Exception as e:\n logger.error(\"bulk processing failed! \"+str(e))\n print(e)", "def file_parser(filename,skip='#',sep=None):\n lines = file_to_list(filename)\n splitted = []\n for line in lines:\n splitted.append(floats_from_string(line,sep=sep))\n\n columns = np.array(splitted).transpose()\n return columns", "def read_text_file(file_name, ncol = 0):\n\t\n\tfrom string import split\n\tinf = file(file_name, \"r\")\n\tline = inf.readline()\n\tdata = []\n\twhile len(line) > 0:\n\t\tif ncol == -1:\n\t\t\tvdata = split(line)\n\t\t\tif data == []:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata.append([float(vdata[i])])\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata[i].append(float(vdata[i]))\t\t\t\n\t\telse:\n\t\t\tvdata = float(split(line)[ncol])\n\t\t\tdata.append(vdata)\n\t\tline = inf.readline()\n\treturn data", "def __bert_text_to_index(self, file_path: str):\r\n data_ids = []\r\n data_types = []\r\n label_ids = []\r\n data_masks = []\r\n with open(file_path, 'r',encoding='UTF-8') as f:\r\n line_data_ids = []\r\n line_data_types = []\r\n line_label = []\r\n line_mask = []\r\n for line in f:\r\n if line != '\\n':\r\n w, t = line.split()\r\n # bert 需要输入index和types 由于是单语句模型,所以type都为0\r\n w_index = self.w2i.get(w, self.unk_index)\r\n t_index = self.tag2index.get(t, 0)\r\n line_data_ids.append(w_index) # index\r\n line_data_types.append(0) # types\r\n line_label.append(t_index) # label index\r\n line_mask.append(0) # we don't mask\r\n else:\r\n # 处理填充开始和结尾 bert 输入语句每个开始需要填充[CLS] 结束[SEP]\r\n max_len_buff = self.max_len-2\r\n if len(line_data_ids) > max_len_buff: # 先进行截断\r\n line_data_ids = line_data_ids[:max_len_buff]\r\n line_data_types = line_data_types[:max_len_buff]\r\n line_label = line_label[:max_len_buff]\r\n line_mask = line_mask[:max_len_buff]\r\n line_data_ids = [self.cls_index] + line_data_ids + [self.sep_index]\r\n line_data_types = [0] + line_data_types + [0]\r\n line_label = [0] + line_label + [0]\r\n line_mask = [0] + line_mask + [0]\r\n\r\n # padding\r\n if len(line_data_ids) < self.max_len: # 填充到最大长度\r\n pad_num = self.max_len - len(line_data_ids)\r\n line_data_ids = [self.pad_index]*pad_num + line_data_ids\r\n line_data_types = [0] * pad_num + line_data_types\r\n line_label = [0] * pad_num + line_label\r\n line_mask = [0] * pad_num + line_mask\r\n data_ids.append(np.array(line_data_ids))\r\n data_types.append(np.array(line_data_types))\r\n label_ids.append(np.array(line_label))\r\n data_masks.append(np.array(line_mask))\r\n line_data_ids = []\r\n line_data_types = []\r\n line_label = []\r\n line_mask = []\r\n print(\"data_ids shape:\"+str(np.array(data_ids).shape))\r\n print(\"data_types shape:\"+str(np.array(data_types).shape))\r\n print(\"data_masks shape:\"+str(np.array(data_masks).shape))\r\n return [np.array(data_ids), np.array(data_types), np.array(data_masks)], np.array(label_ids)", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n X = loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n return fast_sparse_matrix(X)", "def file_line_numbering(file, numbered_file):\n with open(file) as f:\n with open(numbered_file, 'w') as n_f:\n text = f.read().splitlines()\n for line in text:\n line_number = text.index(line)\n new_line = str(line_number).zfill(4) + \" \" + line\n n_f.write(new_line + '\\n')\n \n print(text)", "def read_text_file(url_pattern, real_colnames):\n data = sc.textFile(url_pattern)\n data_1 = data.map(lambda x: x.split('|')).map(lambda x: [i.encode('utf-8') for i in x])\n df = sqlContext.createDataFrame(data_1)\n for c, n in zip(df.columns, real_colnames):\n df = df.withColumnRenamed(c, n)\n return df", "def text_to_columns(text: str) -> str:\n # split into paragraphs by double newlines\n paragraphs = text.split('\\n\\n')\n # strip whitespace\n paragraphs = [txt.strip() for txt in paragraphs]\n\n wrapper = TextWrapper(width=COL_WIDTH)\n paragraphs = [wrapper.wrap(txt) for txt in paragraphs]\n paragraphs = [_format_lines(paragraph) for paragraph in paragraphs]\n\n if len(paragraphs) == 1:\n return '\\n'.join(paragraphs[0])\n elif len(paragraphs) == 2:\n a, b = paragraphs\n return _zip_join(a, b)\n elif len(paragraphs) == 3:\n a, b, c = paragraphs\n return _zip_join(a, b, c)\n elif len(paragraphs) == 4:\n a, b, c, d = paragraphs\n return _zip_join(a, b, c, d)\n else:\n print(\"I'm not quite sure how to scale this.\")", "def import_from_txt(self, file):\n\n with open(file, 'r') as f:\n self.lines = [f.readline().strip()[:80].ljust(80) for i in range(40)]\n\n self.text = ''.join(self.lines)", "def text_to_columns(text):\n parags = text.split(\"\\n\\n\")\n blocks = []\n for p in parags:\n block = splitter(p)\n blocks.append(block)\n output = \"\"\n for linechunks in zip_longest(*blocks, fillvalue=\"\"):\n line = \"\"\n for lc in linechunks[:-1]:\n line += lc + (COL_WIDTH + COL_SPACE - len(lc)) * \" \"\n line += linechunks[-1]\n output += line + \"\\n\"\n return output", "def import_txt(node, filename, index=None, task=None):\n\n # TODO: handle spaces correctly\n\n if task is None:\n # create dummy task if needed\n task = tasklib.Task()\n \n\n child = node.new_child(notebooklib.CONTENT_TYPE_PAGE, \n os.path.basename(filename), index)\n child.set_attr(\"title\", os.path.basename(filename)) # remove for 0.6.4\n\n lines = open(filename).readlines()\n \n out = safefile.open(child.get_data_file(), \"w\", codec=\"utf-8\")\n out.write(u\"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\"><body>\"\"\")\n\n lines = [escape_whitespace(escape(line)) for line in lines]\n text = \"\".join(lines)\n\n # replace newlines\n text = text.replace(u\"\\n\", u\"<br/>\")\n text = text.replace(u\"\\r\", u\"\")\n\n out.write(text)\n out.write(u\"</body></html>\")\n\n out.close()\n task.finish()", "def file_to_list(file_to_read): #comment more\n\n nested_list = []\n first_line = file_to_read.readline()\n first_line = first_line.split()\n rows = len(first_line)\n for lines in file_to_read:\n lines = lines.split()\n\n if len(lines) > rows:\n lines[0] = lines[0] + \" \" + lines[1]\n del lines[1]\n\n for ind in range (1,len(lines)):\n lines[ind] = int(lines[ind])\n\n nested_list.append(lines)\n nested_list.insert(0,first_line)\n return nested_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output the data in slice iz, row ix of an image to standard out.
def dump_row(input, fname, ix=0, iz=0): fout = open(fname, "w") image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() fout.write("# z = %d slice, x = %d row)\n" % (iz, ix)) line = [] for iy in xrange(ny): fout.write("%d\t%12.5g\n" % (iy, image.get_value_at(ix,iy,iz))) fout.close()
[ "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def test_display_image(data_dir):\n epi_img = nib.load(data_dir)\n epi_img_data = epi_img.get_data()\n slice_1 = epi_img_data[73:106, 93:126, 95]\n slice_2 = epi_img_data[:, :, 95]\n preprocess.show_slices([slice_1, slice_2])", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def write_window(img, ds, window):\n new_img = np.array([img[:, :, i] for i in range(img.shape[2])])\n ds.write(new_img, window=window)", "def display_img_info(img) :\n print (img.shape, img.ndim, img.dtype.name, type(img))", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def _write_cutout(self,\n iobj,\n icut,\n cutout_hdu,\n im_data,\n cutout_type):\n\n if cutout_type=='psf':\n start_row = self.obj_data['psf_start_row'][iobj,icut]\n else:\n start_row = self.obj_data['start_row'][iobj,icut]\n\n cutout_hdu.write(im_data, start=start_row)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def dump(arr, filename):\n permutation = tuple(range(arr.ndim)[::-1])\n if np.all(np.array(arr.shape) < 65536 // 2): # OK to go with Nifti1\n i = nib.Nifti1Image(arr.transpose(permutation), np.eye(4))\n else: # only with Nifti2\n i = nib.Nifti2Image(arr.transpose(permutation), np.eye(4))\n nib.save(i, filename)", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def niiwrite_nv(data,mask,temp_path,aff,temp_header):\n Nx,Ny,Nz = mask.shape\n if (data.ndim ==1):\n temp = np.zeros((Nx,Ny,Nz),order='F')\n temp[mask] = data\n if (data.ndim ==2):\n _,Nt = data.shape\n temp = np.zeros((Nx,Ny,Nz,Nt),order='F')\n temp[mask,:] = data\n if (data.ndim ==3):\n Nv, Ne, Nt = data.shape\n temp = np.zeros((Nx,Ny,Nz,Nt),order='F')\n temp[mask,:] = np.squeeze(data[:,0,:])\n for e in range(1,Ne):\n aux = np.zeros((Nx,Ny,Nz,Nt),order='F')\n aux[mask,:] = np.squeeze(data[:,e,:])\n temp = np.concatenate((temp,aux),axis=2)\n\n outni = nib.Nifti1Image(temp,aff,header=temp_header)\n outni.to_filename(temp_path)\n print(\" + Dataset %s written to disk\" % (temp_path))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; symmetry if this is set to pointgroup symmetry (cn or dn) or helical symmetry with pointgroup symmetry (scn or sdn), it will yield angles from the asymmetric unit, not the specified range;
def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = "Minus", symmetry='c1'): from math import pi, sqrt, cos, acos, tan, sin from utilities import even_angles_cd from string import lower,split angles = [] symmetryLower = symmetry.lower() symmetry_string = split(symmetry)[0] if (symmetry_string[0] == "c"): if(phi2 == 359.99): angles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi) if(int(symmetry_string[1:]) > 1): if( int(symmetry_string[1:])%2 ==0): qt = 360.0/int(symmetry_string[1:]) else: qt = 180.0/int(symmetry_string[1:]) n = len(angles) for i in xrange(n): t = n-i-1 if(angles[t][1] == 90.0): if(angles[t][0] >= qt): del angles[t] else: angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi) elif(symmetry_string[0] == "d"): if(phi2 == 359.99): angles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi) if (int(symmetry_string[1:])%2 == 0): qt = 360.0/2/int(symmetry_string[1:]) else: qt = 180.0/2/int(symmetry_string[1:]) n = len(angles) for i in xrange(n): t = n-i-1 if(angles[t][1] == 90.0): if(angles[t][0] >= qt): del angles[t] else: angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi) elif(symmetry_string[0] == "s"): #if symetry is "s", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2 # for helical, theta1 cannot be 0.0 if theta1 > 90.0: ERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1) if theta1 == 0.0: theta1 =90.0 theta_number = int((90.0 - theta1)/theta2) #for helical, symmetry = s or scn cn = int(symmetry_string[2:]) for j in xrange(theta_number,-1, -1): if( j == 0): if (symmetry_string[1] =="c"): if cn%2 == 0: k=int(359.99/cn/delta) else: k=int(359.99/2/cn/delta) elif (symmetry_string[1] =="d"): if cn%2 == 0: k=int(359.99/2/cn/delta) else: k=int(359.99/4/cn/delta) else: ERROR("For helical strucutre, we only support scn and sdn symmetry","even_angles",1) else: if (symmetry_string[1] =="c"): k=int(359.99/cn/delta) elif (symmetry_string[1] =="d"): k=int(359.99/2/cn/delta) for i in xrange(k+1): angles.append([i*delta,90.0-j*theta2,90.0]) else : # This is very close to the Saff even_angles routine on the asymmetric unit; # the only parameters used are symmetry and delta # The formulae are given in the Transform Class Paper # The symmetric unit nVec=[]; # x,y,z triples # is defined by three points b,c, v of Fig 2 of the paper # b is (0,0,1) # c is (sin(thetac),0,cos(thetac)) # a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac)) # f is the normalized sum of all 3 # The possible symmetries are in list_syms # The symmetry determines thetac and Omega # The spherical area is Omega - pi/3; # should be equal to 4 *pi/(3*# Faces) # # symmetry ='tet'; delta = 6; scrunch = 0.9 # closeness factor to eliminate oversampling corners #nVec=[] # x,y,z triples piOver = pi/180.0 Count=0 # used to count the number of angles if (symmetryLower[0:3] =="tet"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps elif (symmetryLower[0:3] =="oct"): m=4.0; fudge=0.8 elif (symmetryLower[0:3] =="ico"): m=5.0; fudge=0.95 else: ERROR("allowable symmetries are cn, dn, tet, oct, icos","even_angles",1) n=3.0 OmegaR = 2.0*pi/m; cosOmega= cos(OmegaR) Edges = 2.0*m*n/(2.0*(m+n)-m*n) Faces = 2*Edges/n Area = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega costhetac = cosOmega/(1-cosOmega) deltaRad= delta*pi/180 NumPoints = int(Area/(deltaRad*deltaRad)) fheight = 1/sqrt(3)/ (tan(OmegaR/2.0)) z0 = costhetac # initialize loop z = z0 phi = 0 Deltaz = (1-costhetac)/(NumPoints-1) #[1, phi,180.0*acos(z)/pi,0.] anglesLast = [phi,180.0*acos(z)/pi,0.] angles.append(anglesLast) nLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z] nVec = [] nVec.append(nLast) Count +=1 for k in xrange(1,(NumPoints-1)): z=z0 + Deltaz*k # Is it higher than fhat or lower r= sqrt(1-z*z) if (z > fheight): phiRmax= OmegaR/2.0 if (z<= fheight): thetaR = acos(z); cosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega); phiMax = 180.0*( OmegaR - acos(cosStuff))/pi angleJump = fudge* delta/r phi = (phi + angleJump)%(phiMax) anglesNew = [phi,180.0*acos(z)/pi,0.]; nNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z] diffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] diffMin = min(diffangleVec) if (diffMin>angleJump*piOver *scrunch): Count +=1 angles.append(anglesNew) nVec.append(nNew) #[Count, phi,180*acos(z)/pi,0.] anglesLast = anglesNew nLast=nNew angles.append( [0.0, 0.0, 0.0] ) nLast= [ 0., 0. , 1.] nVec.append(nLast) if(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] ) angles.reverse() if(phiEqpsi == "Minus"): for i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0 #print(Count,NumPoints) # look at the distribution # Count =len(angles); piOver= pi/180.0; # phiVec = [ angles[k][0] for k in range(Count)] ; # thetaVec = [ angles[k][1] for k in range(Count)] ; # xVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ] # yVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ] # zVec = [cos(piOver * angles[k][1]) for k in range(Count) ] # pylab.plot(yVec,zVec,'.'); pylab.show() return angles
[ "def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):\n\tfrom math import pi, sqrt, cos, acos\n\tangles = []\n\tif (method == 'P'):\n\t\ttemp = Util.even_angles(delta, theta1, theta2, phi1, phi2)\n\t\t#\t\t phi, theta, psi\n\t\tfor i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);\n\telse: #elif (method == 'S'):\n\t\tDeltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)\n\t\ts = delta*pi/180.0\n\t\tNFactor = 3.6/s\n\t\twedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)\n\t\tNumPoints = int(NFactor*NFactor*wedgeFactor)\n\t\tangles.append([phi1, theta1, 0.0])\n\t\tz1 = cos(theta1*pi/180.0); \tphi=phi1 # initialize loop\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z1 + Deltaz*k/(NumPoints-1)\n\t\t\tr= sqrt(1-z*z)\n\t\t\tphi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))\n\t\t\t#[k, phi,180*acos(z)/pi, 0]\n\t\t\tangles.append([phi, 180*acos(z)/pi, 0.0])\n\t\t#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07\n\tif (phiEQpsi == 'Minus'):\n\t\tfor k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0\n\tif( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )\n\n\treturn angles", "def angles(self, num: int) -> Iterable[float]:\n if num < 2:\n raise ValueError(\"num >= 2\")\n start = self.dxf.start_angle % 360\n stop = self.dxf.end_angle % 360\n if stop <= start:\n stop += 360\n for angle in linspace(start, stop, num=num, endpoint=True):\n yield angle % 360", "def getEulerAngles(self):\r\n \r\n q0, q1, q2, q3 = toValue(self.values)\r\n \r\n try:\r\n phi = atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 ** 2 + q2 ** 2))\r\n st = 2 * (q0 * q2 - q3 * q1)\r\n st = 1 if st > 1 else st #gimbal lock\r\n st = -1 if st < -1 else st\r\n theta = asin(st) \r\n psi = atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 ** 2 + q3 ** 2))\r\n except ValueError:\r\n raise ValueError('Quaternion is invalid', q0, q1, q2, q3)\r\n \r\n return toVector(phi, theta, psi)", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def angles(self):\n return self.cellpar()[3:].copy()", "def getEulerAngles(self, axes='sxyz'):\n\n M = self.getMatrix()\n\n try:\n firstaxis, parity, repetition, frame = AXES_TO_TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n TUPLE_TO_AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = NEXT_AXIS[i+parity]\n k = NEXT_AXIS[i-parity+1]\n\n a = numpy.empty((3, ))\n\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > EPS:\n a[0] = math.atan2( M[i, j], M[i, k])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = math.atan2( M[j, i], -M[k, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > EPS:\n a[0] = math.atan2( M[k, j], M[k, k])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = math.atan2( M[j, i], M[i, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = 0.0\n\n if parity:\n a[0], a[1], a[2] = -a[0], -a[1], -a[2]\n if frame:\n a[0], a[2] = a[2], a[0]\n return a", "def test_angle_form():\n # populate angles\n angles = []\n angle = - math.pi\n for i in range(5):\n angles.append(angle)\n angle += 2 * math.pi / 4\n\n # run function for angles\n for angle in angles:\n x, y = angle_to_xy(angle)\n print(x, y)\n\n return None", "def get_angles(sides):\n return [get_angle(sides[1], sides[2], sides[0]),\n get_angle(sides[2], sides[0], sides[1]),\n get_angle(sides[0], sides[1], sides[2])]", "def _angles(angles):\n interpolated_angles = []\n fraction = 1.0 / INTERPOLATION_FACTOR\n for i in range(len(angles) - 1):\n start_angle = float(angles[i])\n end_angle = float(angles[i + 1])\n if abs(end_angle - start_angle) <= 4:\n for j in range(INTERPOLATION_FACTOR):\n start_factor = (INTERPOLATION_FACTOR - j) * start_angle\n end_factor = j * end_angle\n interpolated_angle = (start_factor + end_factor) * fraction\n interpolated_angles.append(interpolated_angle)\n else:\n for j in range(INTERPOLATION_FACTOR):\n if start_angle < 4:\n start_angle += 8\n if end_angle < 4:\n end_angle += 8\n start_factor = (INTERPOLATION_FACTOR - j) * start_angle\n end_factor = j * end_angle\n interpolated_angle = start_factor + end_factor\n interpolated_angle = (interpolated_angle * fraction) % 8\n interpolated_angles.append(interpolated_angle)\n return interpolated_angles", "def angles( self ):\n\n if self.is_valid():\n a = self.__sideA\n b = self.__sideB\n c = self.__sideC\n\n cos_C = ((a**2) + (b**2) - (c**2)) / (2*a*b)\n angle_C = math.acos(cos_C)\n angle_C = math.degrees(angle_C)\n\n cos_B = ((c**2) + (a**2) - (b**2)) / (2*c*a)\n angle_B = math.acos(cos_B)\n angle_B = math.degrees(angle_B)\n\n angle_A = 180 - (angle_B + angle_C)\n\n return round(angle_A, 1), round(angle_B, 1), round(angle_C, 1)\n\n else:\n return None, None, None", "def steps_to_angle():\n pass", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def get_internal_angles(self):\n\n angles = []\n\n for elx, elz in zip(self.grid['x'], self.grid['z']):\n el_angles = []\n xy = np.vstack((elx, elz))\n for i in range(0, elx.size):\n i1 = (i - 1) % elx.size\n i2 = (i + 1) % elx.size\n\n a = (xy[:, i] - xy[:, i1])\n b = (xy[:, i2] - xy[:, i])\n # note that nodes are ordered counter-clockwise!\n angle = np.pi - np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1]\n )\n el_angles.append(angle * 180 / np.pi)\n angles.append(el_angles)\n return np.array(angles)", "def angle(self, s: float) -> float:\n pc = np.array([self._yc, self._xc])\n ps = np.array([self._ys, self._xs])\n pe = np.array([self._ye, self._xe])\n\n phi_0 = np.arctan2(ps[0] - pc[0], ps[1] - pc[1]) # The angle in the initial point of the arc\n phi_final = np.arctan2(pe[0] - pc[0], pe[1] - pc[1]) # The angle in the final point of the arc\n\n # Normalization factor so that s can vary between 0 and 1\n if ps[0] == pe[0] and ps[1] == pe[1]:\n factor = 2 * np.pi # We want a full circle\n else:\n if self._direction == 1.0 and phi_final < phi_0:\n factor = 2 * np.pi - (phi_0 - phi_final)\n elif self._direction == -1.0 and phi_final > phi_0:\n factor = 2 * np.pi - (phi_final - phi_0)\n elif self._direction == 1.0 and phi_final > phi_0:\n factor = phi_final - phi_0\n else:\n factor = phi_0 - phi_final\n\n # Wrap the phi_0 angle between -pi and pi so that the curve is well parameterized\n phi_0 = wrapAngle(phi_0)\n\n # Since s is normalized between 0 and 1, then calculate X and Y accordingly\n aux_angle = wrapAngle(phi_0 + self._direction * s * factor)\n\n pd_dot_ = self._direction * np.array([np.cos(aux_angle), -np.sin(aux_angle)])\n psid_ = np.arctan2(pd_dot_[0], pd_dot_[1])\n return psid_", "def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)", "def angles(self):\n self._sort_measurements()\n return self._angles", "def polar_polygon(nfaces,radius, npoints):\n theta=np.linspace(0,2*np.pi,npoints)[:-1]\n n = nfaces\n r= cos( pi/n )/cos((theta%(2*pi/n))-pi/n)\n d = np.cumsum(np.sqrt(((r[1:]-r[:-1])**2)))\n d = [0]+list(d/d.max())\n return zip(radius*r, theta, d)", "def _from_euler_angles(alpha, beta, gamma):\n beta = beta.acos()\n xrot = _rotmat3x3(alpha, axis=2)\n yrot = _rotmat3x3(beta, axis=0)\n zrot = _rotmat3x3(gamma, axis=2)\n return xrot @ yrot @ zrot", "def angles(self):\n penult = self._coordinates[-2]\n last = self._coordinates[-1]\n angles = []\n for c in self._coordinates:\n angle = (math.atan2(penult[0]-last[0], penult[1]-last[1]) -\n math.atan2(c[0]-last[0], c[1]-last[1]))\n angles.append(angle)\n penult, last = last, c\n return sorted(angles)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; phiEQpsi set this to 'Minus', if you want psi=phi;
def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'): from math import pi, sqrt, cos, acos angles = [] if (method == 'P'): temp = Util.even_angles(delta, theta1, theta2, phi1, phi2) # phi, theta, psi for i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]); else: #elif (method == 'S'): Deltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0) s = delta*pi/180.0 NFactor = 3.6/s wedgeFactor = abs(Deltaz*(phi2-phi1)/720.0) NumPoints = int(NFactor*NFactor*wedgeFactor) angles.append([phi1, theta1, 0.0]) z1 = cos(theta1*pi/180.0); phi=phi1 # initialize loop for k in xrange(1,(NumPoints-1)): z=z1 + Deltaz*k/(NumPoints-1) r= sqrt(1-z*z) phi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1)) #[k, phi,180*acos(z)/pi, 0] angles.append([phi, 180*acos(z)/pi, 0.0]) #angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07 if (phiEQpsi == 'Minus'): for k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0 if( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] ) return angles
[ "def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = \"Minus\", symmetry='c1'):\n\n\tfrom math import pi, sqrt, cos, acos, tan, sin\n\tfrom utilities import even_angles_cd\n\tfrom string import lower,split\n\tangles = []\n\tsymmetryLower = symmetry.lower()\n\tsymmetry_string = split(symmetry)[0]\n\tif (symmetry_string[0] == \"c\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif(int(symmetry_string[1:]) > 1):\n\t\t\t\tif( int(symmetry_string[1:])%2 ==0):\n\t\t\t\t\tqt = 360.0/int(symmetry_string[1:])\n\t\t\t\telse:\n\t\t\t\t\tqt = 180.0/int(symmetry_string[1:])\n\t\t\t\tn = len(angles)\n\t\t\t\tfor i in xrange(n):\n\t\t\t\t\tt = n-i-1\n\t\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"d\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif (int(symmetry_string[1:])%2 == 0):\n\t\t\t\tqt = 360.0/2/int(symmetry_string[1:])\n\t\t\telse:\n\t\t\t\tqt = 180.0/2/int(symmetry_string[1:])\n\t\t\tn = len(angles)\n\t\t\tfor i in xrange(n):\n\t\t\t\tt = n-i-1\n\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"s\"):\n\t\n\t#if symetry is \"s\", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2\n\t\t# for helical, theta1 cannot be 0.0\n\t\tif theta1 > 90.0:\n\t\t\tERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1)\n\t\tif theta1 == 0.0: theta1 =90.0\n\t\ttheta_number = int((90.0 - theta1)/theta2)\n\t\t#for helical, symmetry = s or scn\n\t\tcn = int(symmetry_string[2:])\n\t\tfor j in xrange(theta_number,-1, -1):\n\n\t\t\tif( j == 0):\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/4/cn/delta)\n\t\t\t\telse:\n\t\t\t\t\tERROR(\"For helical strucutre, we only support scn and sdn symmetry\",\"even_angles\",1)\n\n\t\t\telse:\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\t\t\n\t\t\tfor i in xrange(k+1):\n\t\t\t\t\tangles.append([i*delta,90.0-j*theta2,90.0])\n\n\n\telse : # This is very close to the Saff even_angles routine on the asymmetric unit;\n\t\t# the only parameters used are symmetry and delta\n\t\t# The formulae are given in the Transform Class Paper\n\t\t# The symmetric unit \t\tnVec=[]; # x,y,z triples\n\t\t# is defined by three points b,c, v of Fig 2 of the paper\n\t\t# b is (0,0,1)\n\t\t# c is (sin(thetac),0,cos(thetac))\n\t\t# a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac))\n\t\t# f is the normalized sum of all 3\n\t\t\n\t\t# The possible symmetries are in list_syms\n\t\t# The symmetry determines thetac and Omega\n\t\t# The spherical area is Omega - pi/3; \n\t\t# should be equal to 4 *pi/(3*# Faces)\n\t\t#\t\t\n\t\t# symmetry ='tet'; delta = 6;\n\n\t\tscrunch = 0.9 # closeness factor to eliminate oversampling corners\n\t\t#nVec=[] # x,y,z triples\n\n\t\tpiOver = pi/180.0\n\t\tCount=0 # used to count the number of angles\n\t\t\n\t\tif (symmetryLower[0:3] ==\"tet\"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps\n\t\telif (symmetryLower[0:3] ==\"oct\"): m=4.0; fudge=0.8\n\t\telif (symmetryLower[0:3] ==\"ico\"): m=5.0; fudge=0.95\n\t\telse: ERROR(\"allowable symmetries are cn, dn, tet, oct, icos\",\"even_angles\",1)\n\n\t\tn=3.0\n\t\tOmegaR = 2.0*pi/m; cosOmega= cos(OmegaR)\n\t\tEdges = 2.0*m*n/(2.0*(m+n)-m*n)\n\t\tFaces = 2*Edges/n\n\t\tArea = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega\n\t\tcosthetac = cosOmega/(1-cosOmega)\n\t\tdeltaRad= delta*pi/180\n\t\tNumPoints = int(Area/(deltaRad*deltaRad))\n\t\tfheight = 1/sqrt(3)/ (tan(OmegaR/2.0))\n\n\t\tz0 = costhetac # initialize loop\t\n\t\tz = z0\n\t\tphi = 0\n\t\tDeltaz = (1-costhetac)/(NumPoints-1)\n\n\t\t#[1, phi,180.0*acos(z)/pi,0.]\n\t\tanglesLast = [phi,180.0*acos(z)/pi,0.]\n\t\tangles.append(anglesLast)\n\t\tnLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\tnVec = []\n\t\tnVec.append(nLast)\n\n\t\tCount +=1\n\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z0 + Deltaz*k # Is it higher than fhat or lower\n\t\t\tr= sqrt(1-z*z)\n\t\t\tif (z > fheight): phiRmax= OmegaR/2.0\n\t\t\tif (z<= fheight):\n\t\t\t\tthetaR = acos(z); \n\t\t\t\tcosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega);\n\t\t\t\tphiMax = 180.0*( OmegaR - acos(cosStuff))/pi\n\t\t\tangleJump = fudge* delta/r\n\t\t\tphi = (phi + angleJump)%(phiMax)\n\t\t\tanglesNew = [phi,180.0*acos(z)/pi,0.];\n\t\t\tnNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\t\tdiffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] \n\t\t\tdiffMin = min(diffangleVec)\n\t\t\tif (diffMin>angleJump*piOver *scrunch):\n\t\t\t\tCount +=1\n\t\t\t\tangles.append(anglesNew)\n\t\t\t\tnVec.append(nNew)\n\t\t\t\t#[Count, phi,180*acos(z)/pi,0.]\n\t\t\tanglesLast = anglesNew\n\t\t\tnLast=nNew\n\n\t\tangles.append( [0.0, 0.0, 0.0] )\n\t\tnLast= [ 0., 0. , 1.]\n\t\tnVec.append(nLast)\n\t\tif(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] )\n\t\t\n\t\tangles.reverse()\n\t\tif(phiEqpsi == \"Minus\"):\n\t\t\tfor i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0\n\t\t#print(Count,NumPoints)\n\t\t\n#\t\tlook at the distribution\n#\t\tCount =len(angles); piOver= pi/180.0;\n#\t\tphiVec = [ angles[k][0] for k in range(Count)] ;\n#\t\tthetaVec = [ angles[k][1] for k in range(Count)] ;\n#\t\txVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tyVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tzVec = [cos(piOver * angles[k][1]) for k in range(Count) ]\n#\t\tpylab.plot(yVec,zVec,'.'); pylab.show()\n\n\n\treturn angles", "def getEulerAngles(self):\r\n \r\n q0, q1, q2, q3 = toValue(self.values)\r\n \r\n try:\r\n phi = atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 ** 2 + q2 ** 2))\r\n st = 2 * (q0 * q2 - q3 * q1)\r\n st = 1 if st > 1 else st #gimbal lock\r\n st = -1 if st < -1 else st\r\n theta = asin(st) \r\n psi = atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 ** 2 + q3 ** 2))\r\n except ValueError:\r\n raise ValueError('Quaternion is invalid', q0, q1, q2, q3)\r\n \r\n return toVector(phi, theta, psi)", "def euler_rotate(X, phi, theta, psi):\n\n A = numpy.array([\n [numpy.cos(psi) * numpy.cos(phi) - numpy.cos(theta) * numpy.sin(phi) * numpy.sin(psi),\n -numpy.sin(psi) * numpy.cos(phi) - numpy.cos(theta) * numpy.sin(phi) * numpy.cos(psi),\n numpy.sin(theta) * numpy.sin(phi)\n ],\n [numpy.cos(psi) * numpy.sin(phi) + numpy.cos(theta) * numpy.cos(phi) * numpy.sin(psi),\n -numpy.sin(psi) * numpy.sin(phi) + numpy.cos(theta) * numpy.cos(phi) * numpy.cos(psi),\n -numpy.sin(theta) * numpy.cos(phi)\n ],\n [numpy.sin(theta) * numpy.sin(psi), numpy.sin(theta) * numpy.cos(psi), numpy.cos(theta)]\n ])\n return numpy.dot(A, numpy.dot(X, scipy.linalg.inv(A)))", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q", "def angles(self):\n return self.cellpar()[3:].copy()", "def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)", "def refine_angles(self, method='nelder', **opts):\n self.set_idx()\n from lmfit import fit_report, minimize\n p0 = self.define_parameters(**opts)\n self.result = minimize(self.angle_residuals, p0, method=method)\n self.fit_report = fit_report(self.result)\n if self.result.success:\n self.get_parameters(self.result.params)", "def phi(self): \n return [a.phi for a in self]", "def euler_from_quaternion(quaternion):\n x=quaternion[\"im\"][0]\n y=quaternion[\"im\"][1]\n z=quaternion[\"im\"][2]\n w=quaternion[\"re\"]\n\n t0 = 2*(w*z + x*y)\n t1 = 1 - 2 * (x*x + y*y)\n t1 = w*w + x*x - y*y - z*z \n yaw = math.atan2(t0, t1)\n \n t2 = 2*(w*y - z*x)\n pitch = math.asin(t2)\n \n t3 = 2*(w*x + y*z)\n t4 = w*w - x*x - y*y + z*z \n roll = math.atan2(t3, t4)\n \n return yaw, pitch, roll # in radians", "def getEulerAngles(self, axes='sxyz'):\n\n M = self.getMatrix()\n\n try:\n firstaxis, parity, repetition, frame = AXES_TO_TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n TUPLE_TO_AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = NEXT_AXIS[i+parity]\n k = NEXT_AXIS[i-parity+1]\n\n a = numpy.empty((3, ))\n\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > EPS:\n a[0] = math.atan2( M[i, j], M[i, k])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = math.atan2( M[j, i], -M[k, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > EPS:\n a[0] = math.atan2( M[k, j], M[k, k])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = math.atan2( M[j, i], M[i, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = 0.0\n\n if parity:\n a[0], a[1], a[2] = -a[0], -a[1], -a[2]\n if frame:\n a[0], a[2] = a[2], a[0]\n return a", "def steps_to_angle():\n pass", "def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n fixed_omega = False\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)\n assert len(phi_psi_atoms) == len(angles), \"%d != %d\" % (len(phi_psi_atoms), len(angles))\n if not direction_forward:\n phi_psi_atoms.reverse()\n angles.reverse()\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n # print \"phi_psi_pair\", phi_psi_pair\n omega = ps_atoms[2]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # print \"ps_atoms, target_angle_pair\", phi_psi_angles, target_angle_pair\n # phi\n if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:\n rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # psi\n if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:\n rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # omega\n if omega is not None and abs(abs(omega)-180) > 10 and check_omega:\n rotation_angle= -omega+180\n # print \"Omega rotation:\", omega, rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][0],\n phi_psi_pair[0][1],\n angle=rotation_angle,\n direction_forward=direction_forward)\n fixed_omega = True\n # print utils.list_rama_outliers_h(result_h)\n # result_h.write_pdb_file(file_name=\"variant_%s.pdb\" % direction_forward)\n # STOP()\n return result_h, fixed_omega", "def get_azimuths(self):\n\n # create empty lists\n azimuths = [0.] * 24\n rotation = [0.] * 12\n\n # read existing azimuth values\n for j in range(0, 24, 2):\n a = self.get_azimuth(j // 2)\n azimuths[j] = a\n\n #: rotation angle\n d = 0\n\n # DualReturn active?\n if self.is_dual_return():\n for j in range(0, 19, 4):\n d2 = azimuths[j + 4] - azimuths[j]\n if d2 < 0:\n d2 += 360.0\n d = d2 / 2.0\n a = azimuths[j] + d\n azimuths[j + 1] = a\n azimuths[j + 3] = a\n rotation[j // 2] = d\n rotation[j // 2 + 1] = d\n\n rotation[10] = d\n azimuths[21] = azimuths[20] + d\n\n # Strongest / Last-Return\n else:\n for j in range(0, 22, 2):\n d2 = azimuths[j + 2] - azimuths[j]\n if d2 < 0:\n d2 += 360.0\n d = d2 / 2.0\n a = azimuths[j] + d\n azimuths[j + 1] = a\n rotation[j // 2] = d\n\n # last rotation angle from angle before\n rotation[11] = d\n azimuths[23] = azimuths[22] + d\n\n # >360 -> -360\n for j in range(24):\n if azimuths[j] > 360.0:\n azimuths[j] -= 360.0\n\n # print (azimuths)\n # print (rotation)\n return azimuths, rotation", "def TranslateEllipse(self):\n \n phi = eval(self.directionLineEdit.text())\n theta = eval(self.eangleLineEdit.text())\n angles = getAnglesFromEllipse(phi, theta)\n\n #Convert these to globa variables.\n return angles['psi'], angles['alpha'], angles['Jones']", "def adjust_angles(psi):\n pi = np.pi\n while (psi > pi):\n psi -= 2*pi\n while (psi <= -pi):\n psi += 2*pi\n return psi", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def test_angle_form():\n # populate angles\n angles = []\n angle = - math.pi\n for i in range(5):\n angles.append(angle)\n angle += 2 * math.pi / 4\n\n # run function for angles\n for angle in angles:\n x, y = angle_to_xy(angle)\n print(x, y)\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the in_plane angle from two images and output the crosss correlation value The function won't destroy input two images This is the angle that rotates the first image, ima, into the second image, ref. The sense of the rotation is clockwise. center=1 means image is first centered, then rotation angle is found
def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1): from alignment import Numrinit, ringwe, Applyws, ormq from filter import fshift first_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp) nx=ima.get_xsize() if(last_ring == -1): last_ring=int(nx/2)-2 cnx = int(nx/2)+1 cny = cnx mode = "F" #precalculate rings numr = Numrinit(first_ring, last_ring, rstep, mode) wr = ringwe(numr, mode) if(center==1): cs = [0.0]*2 # additio cs = ref.phase_cog() ref1 = fshift(ref, -cs[0], -cs[1]) cimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode) cs = ima.phase_cog() ima1 = fshift(ima, -cs[0], -cs[1]) else: ima1=ima.copy() cimage=Util.Polar2Dm(ref, cnx, cny, numr, mode) Util.Frngs(cimage, numr) Applyws(cimage, numr, wr) [angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny) return angt,sxst, syst, mirrort, peakt
[ "def calculate_translation(reference_im:np.ndarray, \n target_im:np.ndarray,\n ref_to_tar_rotation:np.ndarray=None,\n use_autocorr:bool=True,\n alignment_kwargs:dict={},\n verbose:bool=True,\n ):\n from math import pi\n import cv2\n ## quality check\n # images\n if np.shape(reference_im) != np.shape(target_im):\n raise IndexError(f\"two images should be of the same shape\")\n # rotation matrix\n if ref_to_tar_rotation is None:\n ref_to_tar_rotation = np.diag([1,1])\n elif np.shape(ref_to_tar_rotation) != tuple([2,2]):\n raise IndexError(f\"wrong shape for rotation matrix, should be 2x2. \")\n # get dimensions\n _dz,_dx,_dy = np.shape(reference_im)\n # calculate angle\n if verbose:\n print(f\"-- start calculating drift with rotation between images\")\n _rotation_angle = np.arcsin(ref_to_tar_rotation[0,1])/pi*180\n _temp_new_rotation_M = cv2.getRotationMatrix2D((_dx/2, _dy/2), _rotation_angle, 1) # temporary rotation angle\n # rotate image\n if _rotation_angle != 0:\n _rot_target_im = np.array([cv2.warpAffine(_lyr, _temp_new_rotation_M, \n _lyr.shape, borderMode=cv2.BORDER_DEFAULT) \n for _lyr in target_im], dtype=reference_im.dtype)\n else:\n _rot_target_im = target_im\n # calculate drift \n _drift, _drift_flag = align_image(\n _rot_target_im,\n reference_im,\n precision_fold=10,\n use_autocorr=use_autocorr,\n verbose=verbose,\n #detailed_verbose=verbose,\n **alignment_kwargs,)\n\n if verbose:\n print(f\"--- drift: {np.round(_drift,2)} pixels\")\n \n return _rot_target_im, ref_to_tar_rotation, _drift", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def test_metric_angle_of_intersecting_planes(self):\n x = Symbol('x')\n y = Symbol('y')\n p1 = self.plane(1, 0, 0, -x)\n p2 = self.plane(0, 1, 0, -y)\n\n self.assertEqual(asin(self.norm(p1 ^ p2)), pi / 2)", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def angle_from_centroids(pred_0, pred_1, plane_0, plane_1):\n if np.any(pred_0 != 0) and np.any(pred_1 != 0):\n true_coords = np.argwhere(pred_0)\n xyz_0 = plane_0[:, true_coords[:, 0], true_coords[:, 1]]\n centroid_0 = xyz_0.mean(axis=1)\n\n true_coords = np.argwhere(pred_1)\n xyz_1 = plane_1[:, true_coords[:, 0], true_coords[:, 1]]\n centroid_1 = xyz_1.mean(axis=1)\n\n diff = centroid_1 - centroid_0 # 2 -> z, 1 -> y, 0 -> x\n diff[2] = - diff[2]\n\n z_angle = np.degrees(np.arctan(diff[2] / diff[0])) # z / x\n x_angle = np.degrees(np.arctan(diff[1] / diff[0])) # y / x\n return z_angle, x_angle\n else:\n return 0, 0 # cant compute centroids from black masks", "def collisionAngle(obj1, obj2):\n vec1 = obj1.vec\n vec2 = obj2.vec\n n1 = np.linalg.norm(vec1)\n n2 = np.linalg.norm(vec2)\n return abs(np.cross(vec1,vec2)/(n1*n2))", "def rotationAngle(A,B,m=None,angle_spec=Deg):\n A = asarray(A).reshape(-1,3)\n B = asarray(B).reshape(-1,3)\n if m is None:\n A = normalize(A)\n B = normalize(B)\n n = cross(A,B) # vectors perpendicular to A and B\n t = length(n) == 0.\n if t.any(): # some vectors A and B are parallel\n n[t] = anyPerpendicularVector(A[t])\n n = normalize(n)\n c = dotpr(A,B)\n angle = arccosd(c.clip(min=-1.,max=1.),angle_spec)\n return angle,n\n else:\n m = asarray(m).reshape(-1,3)\n # project vectors on plane\n A = projectionVOP(A,m)\n B = projectionVOP(B,m)\n angle,n = rotationAngle(A,B,angle_spec=angle_spec)\n # check sign of the angles\n m = normalize(m)\n inv = isClose(dotpr(n,m),[-1.])\n angle[inv] *= -1.\n return angle", "def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)", "def angle_through_center(p1, p2):\r\n phi1 = numpy.deg2rad( p1[0] )\r\n phi2 = numpy.deg2rad( p2[0] )\r\n dphi_2 = 0.5 * ( phi2 - phi1 )\r\n dlambda_2 = 0.5 * numpy.deg2rad( p2[1] - p1[1] )\r\n a = numpy.sin( dphi_2 )**2 + numpy.cos( phi1 ) * numpy.cos( phi2 ) * ( numpy.sin( dlambda_2 )**2 )\r\n c = 2. * numpy.arctan2( numpy.sqrt(a), numpy.sqrt( 1. - a ) )\r\n return c", "def angle(a1, a2, a3):\n r21 = (a1.position - a2.position).defunits_value() # remove units immediately to improve speed\n r23 = (a3.position - a2.position).defunits_value()\n return mathutils.alignment_rotation(r21, r23)[0]", "def correlation(im1, im2):\n #return np.sum( (im1-np.mean(im1)) * (im2-np.mean(im2)) / ( np.std(im1) * np.std(im2) ) ) / np.prod(np.shape(im1))\n return np.sum((im1) * (im2)) / np.sqrt(np.sum((im1)**2) * np.sum((im2)**2))", "def AngleCorr(angle,border):\n if angle > border:\n return angle-2*border\n elif angle < -border:\n return angle+2*border\n else:\n return angle", "def slip_angle(self):\n if np.count_nonzero(self.mol_a.geom.perp_ax) == 0:\n self.mol_a.calc_axes()\n if np.count_nonzero(self.mol_b.geom.perp_ax) == 0:\n self.mol_b.calc_axes()\n # vector gonig from A to B\n cen_cen = self.mol_b.centroid() - self.mol_a.centroid()\n # test angle with positive cen_cen and negative cen_cen. keep smallest\n slip_angle_a_pos = ao.vec_angle(self.mol_a.geom.perp_ax, cen_cen)\n slip_angle_a_neg = 180 - slip_angle_a_pos\n slip_angle_b_pos = ao.vec_angle(self.mol_b.geom.perp_ax, cen_cen)\n slip_angle_b_neg = 180 - slip_angle_b_pos\n slip_angle_a = min((slip_angle_a_pos,slip_angle_a_neg))\n slip_angle_b = min((slip_angle_b_pos,slip_angle_b_neg))\n\n final_slip = min(slip_angle_a, slip_angle_b)\n return final_slip", "def rotated_intersections(self):\n slices = self.find_active_intersections()\n rotation_angle = slices[0]\n slices = slices - rotation_angle\n\n return rotation_angle, slices + (slices < 0)*2.*np.pi", "def _get_rotation(self, p1, p2):\n delta = p2 - p1\n l = np.linalg.norm(delta)\n return np.array([[delta[1] / l, -delta[0] / l], [delta[0] / l, delta[1] / l]])", "def detect_angle(self,img):\n angle = self.angleModel(img)\n if angle==90:\n im = Image.fromarray(img).transpose(Image.ROTATE_90)\n img = np.array(im)\n elif angle==180:\n im = Image.fromarray(img).transpose(Image.ROTATE_180)\n img = np.array(im)\n elif angle==270:\n im = Image.fromarray(img).transpose(Image.ROTATE_270)\n img = np.array(im)\n \n return img,angle", "def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def angleOfTwoPoints(self, corner, start, end):\n \n ##find the length between object and other's postion\n #cornerToEnd = self.distance(corner, end)\n ##multiply that distance to the direction of the object\n #scaledCTE = self.scale(start, cornerToEnd)\n ##combined vector of postion and direction to properly \n ## place the direction on the cartesian plane\n #addCTE = self.add(corner, scaledCTE)\n ##calc angle between pos, dir, other\n #angle = self.angle(corner, addCTE, end)\n \n ##-------\n \n #combined vector of postion and direction to properly \n # place the direction on the cartesian plane\n addCTE = self.add(corner, start)\n #calc angle between pos, dir, other\n angle = self.angle(corner, addCTE, end)\n \n #print '### angle between direction and mouse ###', angle\n \n return round(angle)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an image created from a text file. The first line of the image should contain "nx ny nz" (separated by whitespace) All subsequent lines contain "ix iy iz val", where ix, iy, and iz are the integer x, y, and z coordinates of the point and val is the floating point value of that point. All points not explicitly listed are set to zero.
def get_textimage(fname): from string import atoi,atof infile = open(fname) lines = infile.readlines() infile.close() data = lines[0].split() nx = atoi(data[0]) ny = atoi(data[1]) nz = atoi(data[2]) e = EMData() e.set_size(nx, ny, nz) e.to_zero() for line in lines[1:]: data = line.split() ix = atoi(data[0]) iy = atoi(data[1]) iz = atoi(data[2]) val = atof(data[3]) e[ix,iy,iz] = val return e
[ "def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z", "def read_2d_analysis_data(f):\n\n if os.path.exists(f):\n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n else:\n x = 0.\n y = 0.\n\n return x, y", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def read_data(file: str):\n points = []\n data = np.genfromtxt(file, delimiter=' ')\n np.random.shuffle(data)\n for line in data:\n points.append(Point(x=line[0], y=line[1], label=line[2]))\n return points", "def create_from_file(cls, file_name: str) -> \"TensorImage\":\n image_data = image_utils.decode_image_from_file(file_name)\n return cls(image_data, is_from_numpy_array=False)", "def from_file(file):\n return Vector(float(x) for x in file.readline().split())", "def load_from_file(cls, filename):\n # HDR Format Specifications: http://paulbourke.net/dataformats/pic/\n #\n # Typical header:\n # #?RADIANCE\n # SOFTWARE=gegl 0.4.12\n # FORMAT=32-bit_rle_rgbe\n #\n # -Y 1024 +X 2048\n # Data\n hdr = HDR()\n data = []\n header = False\n with open(filename, \"rb\") as f:\n while True:\n line = ''\n c = f.read(1).decode('ascii')\n while c != '\\n':\n line += c\n c = f.read(1).decode('ascii')\n\n # Case: Empty lines\n if line == '' or (len(line) == 1 and ord(line[0]) == 10):\n continue\n # Case: header\n m = re.match(r'^#\\?RADIANCE$', line)\n if m:\n header = True\n continue\n # Case: Size\n m = re.match(r'^(.)(.)\\s(\\d+)\\s(.)(.)\\s(\\d+)$', line)\n if m:\n hdr.rotated = m.group(2) == 'X'\n hdr.xFlipped = m.group(1 if hdr.rotated else 4) == '-'\n hdr.yFlipped = m.group(4 if hdr.rotated else 1) == '+'\n hdr.width = int(m.group(6))\n hdr.height = int(m.group(3))\n break\n # Case: ignored header entries\n if line.startswith('FORMAT=') or \\\n line.startswith('EXPOSURE=') or \\\n line.startswith('COLORCORR=') or \\\n line.startswith('SOFTWARE=') or \\\n line.startswith('PIXASPECT=') or \\\n line.startswith('VIEW=') or \\\n line.startswith('PRIMARIES=') or \\\n line.startswith('GAMMA=') or \\\n line.startswith('# '):\n continue\n break\n # Case: Data\n data = f.read()\n\n assert header, 'Invalid header.'\n assert 4 * hdr.width * hdr.height == len(data) and len(data) > 0, \\\n 'Invalid dimensions (expected dimension: 4x%dx%d, get %d floats)' % (hdr.width, hdr.height, len(data))\n assert not (hdr.rotated or hdr.xFlipped or hdr.yFlipped), 'Flip or rotation flags are not supported.'\n\n # Convert data to floats\n hdr.data = [0.0] * (3 * hdr.width * hdr.height)\n for i in range(hdr.width * hdr.height):\n r = float(data[4 * i])\n g = float(data[4 * i + 1])\n b = float(data[4 * i + 2])\n e = pow(2.0, float(data[4 * i + 3]) - 128.0 + 8.0)\n hdr.data[3 * i] = pow(r * e, 1.0 / GAMMA) / 255.0\n hdr.data[3 * i + 1] = pow(g * e, 1.0 / GAMMA) / 255.0\n hdr.data[3 * i + 2] = pow(b * e, 1.0 / GAMMA) / 255.0\n\n return hdr", "def create_image_from_fits_file(fname):\n hdulist = pf.open(fname)\n return create_image_from_hdulist(hdulist)", "def parse(self, filename):\n self._check_filename_type(filename)\n self._check_extension(filename)\n\n self.infile = filename\n\n i = 0\n n_points = 0\n with open(self.infile, 'r') as input_file:\n for line in input_file:\n n_points += 1\n mesh_points = np.zeros(shape=(n_points, 3))\n\n with open(self.infile, 'r') as input_file:\n \n i = 0\n for line in input_file:\n numbers = line.split() #[n1 p x y z] -> [x y z]\n del numbers[0:2] \n\n j = 0\n for number in numbers:\n\n mesh_points[i][j] = float(number)\n j += 1\n i += 1\n\n return mesh_points", "def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y", "def _parse_txt(path, n_channels):\n f = open(path)\n lines = f.readlines()\n f.close()\n\n geom = np.zeros((0, 2))\n\n for i, line in zip(range(n_channels), lines):\n line = line.replace('\\r', '')\n line = line.replace('\\n', '')\n row = line.split(' ')\n geom = np.vstack((geom, row[:2])).astype('float')\n\n return geom", "def load_poses_from_txt(self, file_name):\n f = open(file_name, 'r')\n s = f.readlines()\n f.close()\n poses = {}\n for cnt, line in enumerate(s):\n P = np.eye(4)\n line_split = [float(i) for i in line.split(\" \") if i!=\"\"]\n withIdx = len(line_split) == 13\n for row in range(3):\n for col in range(4):\n P[row, col] = line_split[row*4 + col + withIdx]\n if withIdx:\n frame_idx = line_split[0]\n else:\n frame_idx = cnt\n poses[frame_idx] = P\n return poses", "def read_file ( filename ):\n # lecture de l'en-tête\n infile = open ( filename, \"r\" ) \n nb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\n\n # creation de la structure de données pour sauver les images :\n # c'est un tableau de listes (1 par classe)\n data = np.empty ( 10, dtype=object ) \n filler = np.frompyfunc(lambda x: list(), 1, 1)\n filler( data, data )\n\n # lecture des images du fichier et tri, classe par classe\n for ligne in infile:\n champs = ligne.split ()\n if len ( champs ) == nb_features + 1:\n classe = int ( champs.pop ( 0 ) )\n data[classe].append ( list ( map ( lambda x: float(x), champs ) ) )\n infile.close ()\n\n # transformation des list en array\n output = np.empty ( 10, dtype=object )\n filler2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\n filler2 ( data, output )\n\n return output", "def read_file ( filename ):\r\n\t# lecture de l'en-tete\r\n\tinfile = open ( filename, \"r\" ) \r\n\tnb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\r\n\r\n\t# creation de la structure de donnees pour sauver les images :\r\n\t# c'est un tableau de listes (1 par classe)\r\n\tdata = np.empty ( 10, dtype=object ) \r\n\tfiller = np.frompyfunc(lambda x: list(), 1, 1)\r\n\tfiller( data, data )\r\n\r\n\t# lecture des images du fichier et tri, classe par classe\r\n\tfor ligne in infile:\r\n\t\tchamps = ligne.split ()\r\n\t\tif len ( champs ) == nb_features + 1:\r\n\t\t\tclasse = int ( champs.pop ( 0 ) )\r\n\t\t\tdata[classe].append ( map ( lambda x: float(x), champs ) ) \r\n\tinfile.close ()\r\n\r\n\t# transformation des list en array\r\n\toutput = np.empty ( 10, dtype=object )\r\n\tfiller2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\r\n\tfiller2 ( data, output )\r\n\r\n\treturn output", "def open_xyz(filename):\n xyz_file = numpy.genfromtxt(fname=filename, skip_header = 2, dtype = 'unicode')\n symbols = xyz_file[:,0]\n coordinates = xyz_file[:,1:]\n coordinates = coordinates.astype(numpy.float)\n\n return symbols, coordinates", "def read_file ( filename ):\n # lecture de l'en-tête\n infile = open ( filename, \"r\" )\n nb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\n\n # creation de la structure de données pour sauver les images :\n # c'est un tableau de listes (1 par classe)\n data = np.empty ( 10, dtype=object )\n filler = np.frompyfunc(lambda x: list(), 1, 1)\n filler( data, data )\n\n # lecture des images du fichier et tri, classe par classe\n for ligne in infile:\n champs = ligne.split ()\n if len ( champs ) == nb_features + 1:\n classe = int ( champs.pop ( 0 ) )\n data[classe].append ( list ( map ( lambda x: float(x), champs ) ) )\n infile.close ()\n\n # transformation des list en array\n output = np.empty ( 10, dtype=object )\n filler2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\n filler2 ( data, output )\n\n return output", "def import_from_mdata_ascii(self, filename):\n \n f = open(filename, 'r') \n data = f.read()\n f.close()\n mylines = data.splitlines()\n #get nps associated with mdata\n self.nps = np.longlong(mylines[0].split()[5])\n \n \n #find the nx,ny,nz line\n p = re.compile('^f .*')\n j=0\n for i in np.arange(len(mylines)):\n if(p.match(mylines[i])):\n j=i\n break\n \n q = mylines[j].split()\n nxyz = np.longlong(q[1])\n nx = np.long(q[3])\n ny = np.long(q[4])\n nz = np.long(q[5])\n \n #allocate your arrays\n self.tally_values= np.zeros([nxyz])\n self.unc_values=np.zeros([nxyz])\n self.xb = np.zeros([nx+1])\n self.yb = np.zeros([ny+1])\n self.zb = np.zeros([nz+1])\n \n #read in xbounds\n t=0\n for i in np.arange((j+1), len(mylines)):\n t= t + len(mylines[i].split())\n if(t == (nx+1)):\n break\n temp=''\n for k in np.arange((j+1),i+1):\n temp += mylines[k]\n self.xb = np.array(temp.split(), dtype=np.double)\n print(\"min/max xb: {0},{1}\".format(min(self.xb),max(self.xb)))\n #read in ybounds\n j=i\n t=0\n for i in np.arange((j+1), len(mylines)):\n t= t + len(mylines[i].split())\n if(t == (ny+1)):\n break\n temp=''\n for k in np.arange((j+1),i+1):\n temp += mylines[k]\n self.yb = np.array(temp.split(), dtype=np.double)\n print(\"min/max yb: {0},{1}\".format(min(self.yb),max(self.yb)))\n #read in zbounds\n j=i\n t=0\n for i in np.arange((j+1), len(mylines)):\n t= t + len(mylines[i].split())\n if(t == (nz+1)):\n break\n temp=''\n for k in np.arange((j+1),i+1):\n temp += mylines[k]\n self.zb = np.array(temp.split(), dtype=np.double)\n print(\"min/max zb: {0},{1}\".format(min(self.zb),max(self.zb)))\n \n #advance to tally values\n p = re.compile('^vals.*')\n j=0\n for i in np.arange(len(mylines)):\n if(p.match(mylines[i])):\n j=i\n break\n #read everything into an array\n tempArr = np.zeros([2*nxyz])\n si=0\n fi=0\n for i in np.arange((j+1),len(mylines)):\n temp = np.asarray(mylines[i].split(), dtype=np.double)\n fi = si + len(temp)\n tempArr[si:fi] = temp\n si = fi\n \n #separate absobred dose and uncertainty\n self.unc_values = tempArr[1::2].reshape([nx,ny,nz],order='F')\n self.tally_values = tempArr[0::2].reshape([nx,ny,nz], order='F')\n #xc = 0.5*self.xb[0:-1] + 0.5*self.xb[1:]\n #e= np.reshape(np.repeat(xc,nz),[nx,nz])\n #e1=np.reshape(np.repeat(zc,nx), [nx,nz], order='F')", "def read_txt_file(self, fname):\n with open(fname, 'r') as f:\n text = f.read()\n text = text.replace('\\r\\n', ' ') # remove line breaks\n numbers = numpy.fromstring(text, dtype='uint8') - 31\n numbers = numbers * (numbers <= 128 - 31) # set UNK to 0\n numbers = numbers * (numbers >= 32 - 31) # set UNK to 0\n assert(self.data_dim == 96), \"one-hot ASCII required 96 dims\"\n array = numpy.zeros((self.data_dim, numbers.shape[0]),\n dtype='uint8')\n for i in range(numbers.shape[0]):\n array[numbers[i], i] = 1\n\n return array", "def read(self, filename):\n \n # https://stackoverflow.com/questions/3277503/how-to-read-a-file-line-by-line-into-a-list#3277516\n self.data = [line.rstrip('\\n') for line in open(filename)]\n size = self.data[0].split(\" \")\n rows = int(size[0])\n columns = int(size[1])\n # https://stackoverflow.com/questions/2397141/how-to-initialize-a-two-dimensional-array-in-python#2397192\n self.canvas = [[\".\" for y in range(columns+1)] for x in range(rows+1)]\n # https://stackoverflow.com/questions/10713004/find-length-of-2d-array-python#10713016\n self.y = len(self.canvas)-1\n self.x = len(self.canvas[0])-1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of available symmetries
def list_syms(): SymStringVec=[]; SymStringVec.append("CSYM"); SymStringVec.append("DSYM"); SymStringVec.append("TET_SYM"); SymStringVec.append("OCT_SYM"); SymStringVec.append("ICOS_SYM"); SymStringVec.append("ISYM"); return SymStringVec
[ "def available_symbologies():\n consts = [d[8:] for d in dir(zint) if d.startswith('BARCODE_')]\n\n return [d for d in consts if d not in IGNORE_ZINT_CONSTS]", "async def _load_supported_symbols() -> List[Symbol]:\n return []", "def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}", "def symtable(self):\n return self.interpreter.symtable", "def look_for_symmetries(self):\n \n for vertex in self.model.all_vertices:\n for i, part1 in enumerate(vertex.particles):\n for j in range(i-1,-1,-1):\n part2 = vertex.particles[j]\n if part1.pdg_code == part2.pdg_code and part1.color == 1:\n if part1.spin == 2 and (i % 2 != j % 2 ):\n continue \n for lorentz in vertex.lorentz:\n if self.symmetries.has_key(lorentz.name):\n if self.symmetries[lorentz.name].has_key(i+1):\n self.symmetries[lorentz.name][i+1] = max(self.symmetries[lorentz.name][i+1], j+1)\n else:\n self.symmetries[lorentz.name][i+1] = j+1\n else:\n self.symmetries[lorentz.name] = {i+1:j+1}\n break", "def get_symbols():\n\n # Dictionary of symbols and their arity\n \"\"\" arities = {\"nLeftClick\": 0,\n \"nTypeInto\": 0,\n \"LeftClick\": 0,\n \"TypeInto\": 0,\n \"pickAnyUnexecuted\": 1,\n \"pickAny\": 1,\n \"LT\": 2,\n \"EQ\": 2\n } \"\"\"\n\t# begin by urueda\n arities = {\"RND\": 0,\n\t\t\t \"nActions\": 0,\n\t\t\t \"nActionsLeftClick\": 0,\n \"nActionsTypeInto\": 0,\n\t\t\t \"nExecutedLeftClick\": 0,\n\t\t\t \"nExecutedTypeInto\": 0,\n \"tLeftClick\": 0,\n \"tTypeInto\": 0,\n\t\t\t \"tAny\": 0,\n\t\t\t \"tPreviousAction\": 0,\n\t\t\t \"NULL\": 0,\n\t\t\t \"pickLeastExecuted\": 2,\n\t\t\t \"pickMostExecuted\": 2,\n\t\t\t \"pick\": 2,\n \"pickAnyUnexecuted\": 1,\n \"pickDifferentFrom\": 1,\n \"pickSameAs\": 1,\n \"LT\": 2,\n\t\t\t \"LE\": 2,\n \"EQ\": 2,\n\t\t\t \"AND\": 2,\n\t\t\t \"OR\": 2,\n\t\t\t \"NOT\": 1\n }\n terminalsNum = [\"RND\",\n\t\t\t\t \"nActions\",\n\t\t\t\t \"nActionsLeftClick\",\n\t\t\t\t \"nActionsTypeInto\",\n\t\t\t\t \"nExecutedLeftClick\",\n\t\t\t\t \"nExecutedTypeInto\"\n\t]\n terminalsType = [\"tLeftClick\",\n \"tTypeInto\",\n \"tAny\",\n \"tPreviousAction\"\n ]\n logicFunctionsNum2 = [\"LT\", \"LE\", \"EQ\"]\n logicFunctionsLog2 = [\"AND\", \"OR\"]\n logicFunctionsLog1 = [\"NOT\"]\n logicFunctionsAll = logicFunctionsNum2 + logicFunctionsLog2 + logicFunctionsLog1\n functions0 = [\"NULL\"] # ()\n functions2 = [\"pickLeastExecuted\", \"pickMostExecuted\", \"pick\"] # (type,percent) # 0.0 .. 1.0\t\n functions1 = [\"pickAnyUnexecuted\", \"pickDifferentFrom\", \"pickSameAs\"] # (type)\n functionsAll = functions0 + functions2 + functions1\n\t# end by urueda\n \"\"\" # List of terminal symbols\n terminals = []\n #List of types\n types = []\n # List of function symbols\n functions = []\n # List of logic operator\n logicFunctions = [] \"\"\"\n\n \"\"\" # Append symbols to terminals or functions by looping over the\n # arities items\n for key, value in arities.items():\n # A symbol with arity 0 is a terminal\n if value == 0:\n # Append the symbols to the terminals list\n if key == \"nLeftClick\" or key == \"nTypeInto\":\n terminals.append(key)\n else:\n types.append(key)\n else:\n # Append the symbols to the functions list\n if key == \"LT\" or key == \"EQ\":\n logicFunctions.append(key)\n else:\n functions.append(key) \"\"\"\n\t\t\t\t\n # begin by urueda\n print(\"\\nEA symbols:\\n\")\n print(\"\\tArities: \" + str(arities) + \"\\n\")\n print(\"\\tTerminalsNum: \" + str(terminalsNum) + \"\\n\")\n print(\"\\tTerminalsType: \" + str(terminalsType) + \"\\n\")\n print(\"\\tLogicFunctionsNum2: \" + str(logicFunctionsNum2) + \"\\n\")\n print(\"\\tLogicFunctionsLog2: \" + str(logicFunctionsLog2) + \"\\n\")\n print(\"\\tLogicFunctionsLog1: \" + str(logicFunctionsLog1) + \"\\n\")\n print(\"\\tLogicFunctionsAll: \" + str(logicFunctionsAll) + \"\\n\")\n print(\"\\tFunctions0: \" + str(functions0) + \"\\n\")\n print(\"\\tFunctions2: \" + str(functions2) + \"\\n\")\n print(\"\\tFunctions1: \" + str(functions1) + \"\\n\")\n print(\"\\tFunctionsAll: \" + str(functionsAll) + \"\\n\")\n # end by urueda\n\n return {\"arities\": arities, \"terminalsNum\": terminalsNum, \"terminalsType\": terminalsType, \"logicFunctionsNum2\":logicFunctionsNum2, \"logicFunctionsLog2\":logicFunctionsLog2, \"logicFunctionsLog1\":logicFunctionsLog1, \"logicFunctionsAll\":logicFunctionsAll, \"functions0\":functions0, \"functions2\": functions2, \"functions1\": functions1, \"functionsAll\": functionsAll}", "def z2_symmetries(self) -> \"Z2Symmetries\":\n return self._z2_symmetries", "def itersymbols(self):\n for syms in self._symbols.itervalues():\n for sym in syms:\n yield sym", "def get_symbol(self):\n return []", "def get_symbols_list(self):\n return self.symbols_list", "def sym_gen():\n for sym in vocab_list:\n if sym not in text_encoder.RESERVED_TOKENS:\n yield sym", "def get_symbol_map():\n package = {}\n all_instruments = requests.get(\n \"http://egchallenge.tech/instruments\").json()\n\n for instrument in all_instruments:\n package[instrument[\"id\"]] = instrument[\"symbol\"]\n\n return package", "def add_dysymtab(self, symbols=[]):\n for header in self.__mm.headers:\n for cmd in header.commands:\n load_cmd = cmd[0]\n cmd_info = cmd[1]\n try:\n if load_cmd.get_cmd_name() == 'LC_DYSYMTAB':\n dysymtab = []\n for i in range(18):\n dysymtab.append(\"????????\")\n\n cmd_bytes = struct.pack(self.__byte_order(header) + 'I', load_cmd.cmd).encode('hex')\n cmd_size = struct.pack(self.__byte_order(header) + 'I', load_cmd.cmdsize).encode('hex')\n if 'ilocalsym' in symbols: dysymtab[0] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['ilocalsym']).encode('hex')\n if 'nlocalsym' in symbols: dysymtab[1] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nlocalsym']).encode('hex')\n if 'iextdefsym' in symbols: dysymtab[2] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['iextdefsym']).encode('hex')\n if 'nextdefsym' in symbols: dysymtab[3] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nextdefsym']).encode('hex')\n if 'iundefsym' in symbols: dysymtab[4] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['iundefsym']).encode('hex')\n if 'nundefsym' in symbols: dysymtab[5] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nundefsym']).encode('hex')\n if 'tocoff' in symbols: dysymtab[6] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['tocoff']).encode('hex')\n if 'ntoc' in symbols: dysymtab[7] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['ntoc']).encode('hex')\n if 'modtaboff' in symbols: dysymtab[8] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['modtaboff']).encode('hex')\n if 'nmodtab' in symbols: dysymtab[9] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nmodtab']).encode('hex')\n if 'extrefsymoff' in symbols: dysymtab[10] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['extrefsymoff']).encode('hex')\n if 'nextrefsyms' in symbols: dysymtab[11] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nextrefsyms']).encode('hex')\n if 'indirectsymoff' in symbols: dysymtab[12] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['indirectsymoff']).encode('hex')\n if 'nindirectsyms' in symbols: dysymtab[13] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nindirectsyms']).encode('hex')\n if 'extreloff' in symbols: dysymtab[14] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['extreloff']).encode('hex')\n if 'nextrel' in symbols: dysymtab[15] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nextrel']).encode('hex')\n if 'nlocrel' in symbols: dysymtab[16] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['nlocrel']).encode('hex')\n if 'locreloff' in symbols: dysymtab[17] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['locreloff']).encode('hex')\n if len(symbols) > 0:\n for i in range(len(dysymtab)):\n if '??' in dysymtab[-1]:\n dysymtab.pop(-1)\n else:\n break\n self.__sig.add_named_hex(load_cmd.get_cmd_name() + \"_\" + str(self.__dyld_count), cmd_bytes + cmd_size + ''.join(dysymtab))\n else:\n self.__sig.add_named_hex(load_cmd.get_cmd_name() + \"_\" + str(self.__dyld_count), cmd_bytes + cmd_size)\n self.__dyld_count += 1\n except Exception as e:\n print str(e)", "def list_symbol_tables(mst):\n stlist = []\n def append_st(st):\n #print(st)\n stlist.append(st)\n for s in st.get_symbols():\n for ns in s.get_namespaces():\n append_st(ns)\n if not isinstance(mst, symtable.SymbolTable):\n # Assume it is text of a program to compile\n mst = symtable.symtable(mst, '<string>', 'exec')\n append_st(mst)\n return stlist", "def symbol_set(self):\n return tuple([specie.symbol for specie in self.types_of_specie])", "def get_symbol_table(self):\n return self.symbol_table", "def find_symbols(KB):\n symbols = [] # List of symbols initially empty\n for clause in KB: # Loop through every clause in the KB\n clause = clause.split() \n for term in clause: # Loop through every term in clause, adding symbol to list if not already present\n if term.startswith('-'):\n term = term[1:]\n if term not in symbols: \n symbols.append(term)\n symbols.sort() # Sort symbol list\n return symbols", "def create_symbol_table() -> pynini.SymbolTable:\n table = pynini.SymbolTable()\n for num in range(34, 200): # ascii alphanum + letter range\n table.add_symbol(chr(num), num)\n table.add_symbol(EPS, 0)\n table.add_symbol(WHITE_SPACE, 32)\n return table", "def generate_keywords(self, symbol):\n url = \"https://apiv2.bitcoinaverage.com/symbols/indices/names\"\n try:\n response = requests.get(url)\n response.raise_for_status()\n names = response.json()\n\n for coin_type in names:\n for key, value in names[coin_type].items():\n if symbol.upper() == key or symbol.lower() == value.lower():\n return [key, value] if key != \"BTC\" else [key, value, \"XBT\"]\n except requests.RequestException as e:\n print(e)\n\n return [symbol]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a centered square (or cube) with edge length of d.
def model_square(d, nx, ny, nz=1): e = EMData() e.set_size(nx, ny, nz) e.process_inplace("testimage.squarecube", {"edge_length":d, "fill":1}) return e
[ "def square_diamond(sx, sy, size, strong):\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)", "def square(t, length):\n polygon(t, 4, length)", "def make_box_square(box, offset_scale=0.05):\n\n x_min, y_min, x_max, y_max = box[:4]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n\n if height >= width:\n half_box = height / 2.\n x_min = center_x - half_box\n x_max = center_x + half_box\n if width > height:\n half_box = width / 2.\n y_min = center_y - half_box\n y_max = center_y + half_box\n\n box_side_lenght = (x_max + x_min) / 2.\n offset = offset_scale * box_side_lenght\n x_min = x_min - offset\n x_max = x_max + offset\n y_min = y_min - offset\n y_max = y_max + offset\n return (int(x_min), int(y_min), int(x_max), int(y_max))", "def plasm_cube(self, size=0.1, color=WHITE):\n return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))", "def sample_spherical(d):\n vector = rnd.normal(loc=0, scale=1, size=d)\n return vector / np.linalg.norm(vector, axis=0)", "def draw_square(start_point, angle, length, color=sd.COLOR_YELLOW):\n draw_equilateral_figure(start_point=start_point, angle=angle, length=length, count_angles=4, color=color)", "def square(center, side_length, *args, **kwargs):\n center = np.asarray(center)\n side_length = float(side_length)\n lower_left = center - 0.5*side_length\n return patch.Rectangle(lower_left, side_length, side_length,\n \t\t\t\t\t *args, **kwargs)", "def draw_square(t, n, sz):\t\n for i in range(n):\n for j in range(4):\n t.fd(sz)\n t.left(90)\n t.right(360/n)", "def construct_symmetric_uniform_mesh(self, h):\n half_size = self.size / 2\n num_rows = num_cols = int(half_size / h) + 2 # Only need to store up to middle\n return SimpleMesh(h, num_rows, num_cols)", "def sqrtCF(d):\n sqrtD = sqrt(d)\n P = 0\n Q = 1\n while True:\n a = int(floor((P + sqrtD) / Q))\n yield a\n P = a * Q - P\n Q = (d - P*P) // Q # It can be shown that Q evenly divides d - P*P", "def HyperCube(dim = 3, base = 2) :\r\n\r\n limits = dim * [base]\r\n\r\n ng = util.enumeration.NTupleGenerator(limits)\r\n N = ng.Num()\r\n vertexList = []\r\n\r\n for i in range(0, N) :\r\n vertexList.append( copy.copy(ng.Current()) )\r\n ng.Next()\r\n\r\n X = networkx.Graph()\r\n\r\n for i in range(0, N) :\r\n for j in range(i+1, N) :\r\n if HammingDistance( vertexList[i], vertexList[j] ) == 1 :\r\n X.add_edge(i,j)\r\n\r\n return X", "def __double_centering(self, squared, numpoints):\n identity_matrix = np.identity(numpoints)\n ones = np.ones(squared.shape)\n j = identity_matrix - (1 / numpoints) * ones\n b = -(1/2) * j @ squared @ j\n return b", "def C(width = 1, size = (10, 20), layer = 0):\n D = Device(name = 'C')\n w = width/2\n s1, s2 = size\n points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2-w),\n (s1, s2-w), (s1, s2+w), (-w, s2+w), (-w, -w)]\n D.add_polygon(points, layer = layer)\n D.add_port(name = 1, midpoint = (s1, s2), width = width, orientation = 0)\n D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0)\n return D", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def squaremesh(md,Lx,Ly,nx,ny):\n\n\t#get number of elements and number of nodes\n\tnel=(nx-1)*(ny-1)*2\n\tnods=nx*ny\n\n\t#initialization\n\tindex=np.zeros((nel,3),int)\n\tx=np.zeros((nx*ny))\n\ty=np.zeros((nx*ny))\n\n\t#create coordinates\n\tfor n in xrange(0,nx):\n\t\tfor m in xrange(0,ny):\n\t\t\tx[n*ny+m]=float(n)\n\t\t\ty[n*ny+m]=float(m)\n\n\t#create index\n\tfor n in xrange(0,nx-1):\n\t\tfor m in xrange(0,ny-1):\n\t\t\tA=n*ny+(m+1)\n\t\t\tB=A+1\n\t\t\tC=(n+1)*ny+(m+1)\n\t\t\tD=C+1\n\t\t\tindex[n*(ny-1)*2+2*m,:]=[A,C,B]\n\t\t\tindex[n*(ny-1)*2+2*(m+1)-1,:]=[B,C,D]\n\n\t#Scale x and y\n\tx=x/np.max(x)*Lx\n\ty=y/np.max(y)*Ly\n\n\t#create segments\n\tsegments=np.zeros((2*(nx-1)+2*(ny-1),3),int)\n\t#left edge:\n\tsegments[0:ny-1,:]=np.vstack((np.arange(2,ny+1),np.arange(1,ny),(2*np.arange(1,ny)-1))).T\n\t#right edge:\n\tsegments[ny-1:2*(ny-1),:]=np.vstack((np.arange(ny*(nx-1)+1,nx*ny),np.arange(ny*(nx-1)+2,nx*ny+1),2*np.arange((ny-1)*(nx-2)+1,(nx-1)*(ny-1)+1))).T\n\t#front edge:\n\tsegments[2*(ny-1):2*(ny-1)+(nx-1),:]=np.vstack((np.arange(2*ny,ny*nx+1,ny),np.arange(ny,ny*(nx-1)+1,ny),np.arange(2*(ny-1),2*(nx-1)*(ny-1)+1,2*(ny-1)))).T\n\t#back edge\n\tsegments[2*(ny-1)+(nx-1):2*(nx-1)+2*(ny-1),:]=np.vstack((np.arange(1,(nx-2)*ny+2,ny),np.arange(ny+1,ny*(nx-1)+2,ny),np.arange(1,2*(nx-2)*(ny-1)+2,2*(ny-1)))).T\n\n\t#plug coordinates and nodes\n\tmd.mesh=mesh2d()\n\tmd.mesh.x=x\n\tmd.mesh.y=y\n\tmd.mesh.numberofvertices=nods\n\tmd.mesh.vertexonboundary=np.zeros((nods),bool)\n\tmd.mesh.vertexonboundary[segments[:,0:2]-1]=True\n\n\t#plug elements\n\tmd.mesh.elements=index\n\tmd.mesh.segments=segments\n\tmd.mesh.numberofelements=nel\n\n\t#Now, build the connectivity tables for this mesh.\n\tmd.mesh.vertexconnectivity=NodeConnectivity(md.mesh.elements,md.mesh.numberofvertices)[0]\n\tmd.mesh.elementconnectivity=ElementConnectivity(md.mesh.elements,md.mesh.vertexconnectivity)[0]\n\n\treturn md", "def cube(cls, center=[0,0,0], radius=[1,1,1]):\n c = Vector(0, 0, 0)\n r = [1, 1, 1]\n if isinstance(center, list): c = Vector(center)\n if isinstance(radius, list): r = radius\n else: r = [radius, radius, radius]\n\n polygons = list(map(\n lambda v: Polygon( \n list(map(lambda i: \n Vertex(\n Vector(\n c.x + r[0] * (2 * bool(i & 1) - 1),\n c.y + r[1] * (2 * bool(i & 2) - 1),\n c.z + r[2] * (2 * bool(i & 4) - 1)\n ), \n None\n ), v[0]))),\n [\n [[0, 4, 6, 2], [-1, 0, 0]],\n [[1, 3, 7, 5], [+1, 0, 0]],\n [[0, 1, 5, 4], [0, -1, 0]],\n [[2, 6, 7, 3], [0, +1, 0]],\n [[0, 2, 3, 1], [0, 0, -1]],\n [[4, 5, 7, 6], [0, 0, +1]]\n ]))\n return CSG.fromPolygons(polygons)", "def _compute_diamond_matrix(self, d):\n return self.cuspidal_submodule().diamond_bracket_matrix(d).block_sum(self.eisenstein_submodule().diamond_bracket_matrix(d))", "def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):\n return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)", "def generate_square_scene(size):\n scene = [\n (np.array((0.0, 0.0)), np.array((size, 0.0))),\n (np.array((0.0, 0.0)), np.array((0.0, size))),\n (np.array((size, size)), np.array((size, 0.0))),\n (np.array((size, size)), np.array((0.0, size)))]\n\n return scene" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a Spider filename string and insert parameters.
def parse_spider_fname(mystr, *fieldvals): # helper functions and classes def rm_stack_char(mystr): "Helper function to remove a stack character if it exists" stackloc = mystr.find("@") if stackloc != -1: # there's an '@' somewhere if len(mystr) - 1 == stackloc: # It's at the end of the string return mystr[:-1] else: # '@' not at the end, so it's an error raise ValueError, "Invalid format: misplaced '@'." else: # no '@' at all return mystr class Fieldloc: "Helper class to store description of a field" def __init__(self, begin, end): self.begin = begin self.end = end def count(self): "Size of the field (including braces)" return self.end - self.begin + 1 def find_fields(mystr): "Helper function to identify and validate fields in a string" fields = [] loc = 0 while True: begin = mystr.find('{', loc) if begin == -1: break end = mystr.find('}', begin) field = Fieldloc(begin, end) # check validity asterisks = mystr[begin+1:end] if asterisks.strip("*") != "": raise ValueError, "Malformed {*...*} field: %s" % \ mystr[begin:end+1] fields.append(Fieldloc(begin, end)) loc = end return fields # remove leading whitespace mystr.strip() # remove stack character (if it exists) mystr = rm_stack_char(mystr) # locate fields to replace fields = find_fields(mystr) if len(fields) != len(fieldvals): # wrong number of fields? raise ValueError, "Number of field values provided differs from" \ "the number of {*...*} fields." newstrfrags = [] loc = 0 for i, field in enumerate(fields): # text before the field newstrfrags.append(mystr[loc:field.begin]) # replace the field with the field value fieldsize = field.count() - 2 fielddesc = "%0" + str(fieldsize) + "d" newstrfrags.append(fielddesc % fieldvals[i]) loc = field.end + 1 newstrfrags.append(mystr[loc:]) return "".join(newstrfrags)
[ "def parse_params(self, fn=None):\n return parse_filename(str(self) if fn is None else fn, conventions=self.conventions, postprocessor=self.postprocessor)", "def _define_params_from_string(self, params_string):\n params_list = params_string.split()\n self.token_name = params_list[0]\n self.df = params_list[1]\n self.tf = params_list[2]\n self.doc_dict = self._create_doc_dict_from_string(params_string[3:])", "def parseFileName(filename):\n entry = DataEntry(\"\",0,{},{},0,0)\n wordArray = filename.split(\".\")\n entry.publication_name = wordArray[1]\n entry.year = wordArray[0]\n return entry", "def parseFilePath(self, filepath):\n\n li = filepath.split(\"/\") \n last = li[-1].split(\"_\")\n\n self.subjectName = li[-2]\n self.experimenterName = li[-3]\n self.experimentDate = last[-1]\n self.paradigm = last[-2]\n self.subjectName = last[-3]", "def parse(filename):\n file_map = {\n '1995-1996.html': ninety_six,\n '2005-2006.html': twenty_six,\n '2014-2015.html': twenty_fifteen\n }\n func = file_map.get(filename, lambda: \"Invalid File\")\n func(filename)", "def _parse_filename(filename):\n base = os.path.basename(os.path.splitext(filename)[0])\n x = base.split('-')\n if len(x) == 2:\n return x[0], None, int(x[1])\n elif len(x) == 3:\n return x[0], x[1].lower(), int(x[2])", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def ParseFileName(self, fn, fnParser):\n self.filename = fn\n attrs = fnParser.Parse(fn)\n [setattr(self, k, _TryNumeric(v)) for k, v in attrs]", "def parse_glider_filename(filename):\n head, tail = os.path.split(filename)\n\n matches = re.search(r\"([\\w\\d\\-]+)-(\\d+)-(\\d+)-(\\d+)-(\\d+)\\.(\\w+)$\", tail)\n\n if matches is not None:\n return {\n 'path': head,\n 'glider': matches.group(1),\n 'year': int(matches.group(2)),\n 'day': int(matches.group(3)),\n 'mission': int(matches.group(4)),\n 'segment': int(matches.group(5)),\n 'type': matches.group(6)\n }\n else:\n raise ValueError(\n \"Filename ({}) not in usual glider format: \"\n \"<glider name>-<year>-<julian day>-\"\n \"<mission>-<segment>.<extenstion>\".format(filename)\n )", "def prepare_from_file(*args, **kwargs):", "def parse_filename(filename): # , time_fmt=TIME_INFILE_FMT):\n # Split the name up into its \"blocks\"\n parts = filename.split(\"_\")\n hive_str, rpi_str = parts[1:3]\n day_str = parts[3]\n method = parts[5]\n\n # Parse Hive and RPi number\n hive = int(hive_str[-1])\n rpi = int(rpi_str[-1])\n method = method.strip(\".csv\")\n\n # # Parse timestring into a datetime object\n # dt_naive = datetime.strptime(t_str, time_fmt)\n # dt_utc = pytz.utc.localize(dt_naive)\n\n return hive, rpi, method, day_str", "def parse(self, infile):\r\n raise NotImplementedError()", "def init_from_string(self, fs_in, param_string):\n if '(' in param_string:\n name_params_re = re.compile(r'(\\w*)\\((.*)\\)$')\n pieces = name_params_re.match(param_string)\n name = pieces.group(1)\n params = pieces.group(2)\n param_list = params.split(';')\n param_dict = {}\n for param in param_list:\n if '=' not in param:\n raise ValueError('preprocess param %s missing a value.' % param)\n k, v = param.split('=', 1)\n if v.isdigit():\n v = int(v)\n else:\n try:\n v = float(v)\n except ValueError:\n pass\n param_dict[k] = v\n self._name = name\n self.init_highpass(param_dict['highpass_cutoff'],\n param_dict['highpass_order'])\n self.init_channel_numbers(param_dict['channel_numbers'])\n else:\n self.__init__(self, fs_in, param_string)", "def _parse_file_path(self, input_path):\n pass", "def _parse_params_file(self, filename: str) -> None:\n content = open(filename, 'r').read()\n for data in yaml.load_all(content):\n self.params = {**self.params, **data}\n print(f'parameters: {self.params}')", "def __init__(self, filename=None, label=None, tokens=None):\n if label: # specify from label/tokens, for testing.\n self.label = label\n self.tokens = tokens\n self.postID = -1\n self.likes = -1\n else: # specify from file.\n self.filename = filename\n parsedNames = filename.split(\"#\")\n if 'pop' in parsedNames[0]:\n self.label = 'pop'\n else:\n self.label = 'sod'\n self.postID = parsedNames[1]\n self.likes = parsedNames[2]\n self.tokenize()", "def parse(self,fp):\n\t\t\n\t\t# create the plex scanner for fp\n\t\t# call parsing logic\n\t\tself.create_scanner(fp)\n\t\tself.stmt_list()", "def parsing_input_parameter_file():\r\n\r\n input_file = open('VCF_Extractor_Parameter_File.txt', 'r')\r\n\r\n print (\"Parsed Lines from User\")\r\n \r\n for line in input_file:\r\n if line.startswith(\"--\"):\r\n line=line.rstrip('\\n')\r\n # just printing user inpt for validation\r\n print (line)\r\n parsed_parameters=line.split(\"--\")\r\n\r\n for x in range(1, len(parsed_parameters)):\r\n inputs = parsed_parameters[x].split(\" \")\r\n \r\n if inputs[0] == \"VCF_Input_File\":\r\n vcf_input_file = inputs[1] \r\n\r\n elif inputs[0] == \"Samples_to_Extract_File\":\r\n samples_to_extract_file = inputs[1]\r\n\r\n elif inputs[0] == \"VCF_Output_File\":\r\n vcf_output_file = inputs[1]\r\n \r\n\r\n # Skip anything else\r\n else:\r\n pass\r\n\r\n input_file.close()\r\n\r\n #Printing \r\n print (\"Name of VCF input File is: \", vcf_input_file)\r\n print (\"Name of Samples to Extract File: \", samples_to_extract_file)\r\n print (\"Name of VCF Output File is: \", vcf_output_file)\r\n \r\n return{'vcf_input_file':vcf_input_file, 'samples_to_extract_file':samples_to_extract_file,\r\n 'vcf_output_file':vcf_output_file}", "def parse_filenames(filenames):\n \n for fn in filenames:\n dirname, basename = path.split(fn)\n subject_visit = basename[:7]\n visit = basename[5:7]\n yield dirname, basename, subject_visit, visit" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the data in slice iz, row ix of an image to standard out.
def print_row(input, ix=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, x = %d row)" % (iz, ix) line = [] for iy in xrange(ny): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((iy + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
[ "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def test_display_image(data_dir):\n epi_img = nib.load(data_dir)\n epi_img_data = epi_img.get_data()\n slice_1 = epi_img_data[73:106, 93:126, 95]\n slice_2 = epi_img_data[:, :, 95]\n preprocess.show_slices([slice_1, slice_2])", "def display_img_info(img) :\n print (img.shape, img.ndim, img.dtype.name, type(img))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n lm_i = self.gtdb['lm'][i, :]\n\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))\n print 'The landmarks points are: {}'.format(lm_i)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printImage(imageObject):\n # TODO\n pass", "def dump(self):\r\n for yIdx in xrange(self.height):\r\n row = \"\"\r\n for xIdx in xrange(self.width):\r\n row += \"%4s \" % self.grid[xIdx][yIdx]\r\n print row", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_xy(xy_data):\n for i in range(0, len(xy_data)):\n print(\"%g\\t%g\" % (xy_data[i][0], xy_data[i][1]))", "def prettyPrint3D(M):\n m, n_C, n_H, n_W = M.shape\n\n for i in range(m):\n \n for c in range(n_C):\n print('Image {}, channel {}'.format(i + 1, c + 1), end='\\n\\n') \n\n for h in range(n_H):\n print(\"/\", end=\"\")\n\n for j in range(n_W):\n\n print(M[i, c, h, j], end = \",\")\n\n print(\"/\", end='\\n\\n')\n \n print('-------------------', end='\\n\\n')", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the data in slice iz, column iy of an image to standard out.
def print_col(input, iy=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, y = %d col)" % (iz, iy) line = [] for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
[ "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def test_display_image(data_dir):\n epi_img = nib.load(data_dir)\n epi_img_data = epi_img.get_data()\n slice_1 = epi_img_data[73:106, 93:126, 95]\n slice_2 = epi_img_data[:, :, 95]\n preprocess.show_slices([slice_1, slice_2])", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def display_img_info(img) :\n print (img.shape, img.ndim, img.dtype.name, type(img))", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n lm_i = self.gtdb['lm'][i, :]\n\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))\n print 'The landmarks points are: {}'.format(lm_i)", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def dump(self):\r\n for yIdx in xrange(self.height):\r\n row = \"\"\r\n for xIdx in xrange(self.width):\r\n row += \"%4s \" % self.grid[xIdx][yIdx]\r\n print row", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_sitk_info(itk_image):\n print(\"[INFO]: Shape - {itk_image.GetSize()}\")\n print(\"[INFO]: Spacing - {itk_image.GetSpacing()}\")\n print(\"[INFO]: Origin - {itk_image.GetOrigin()}\")\n print(\"[INFO]: Direction - {itk_image.GetDirection()}\\n\")", "def prettyPrint3D(M):\n m, n_C, n_H, n_W = M.shape\n\n for i in range(m):\n \n for c in range(n_C):\n print('Image {}, channel {}'.format(i + 1, c + 1), end='\\n\\n') \n\n for h in range(n_H):\n print(\"/\", end=\"\")\n\n for j in range(n_W):\n\n print(M[i, c, h, j], end = \",\")\n\n print(\"/\", end='\\n\\n')\n \n print('-------------------', end='\\n\\n')", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def print_xy(xy_data):\n for i in range(0, len(xy_data)):\n print(\"%g\\t%g\" % (xy_data[i][0], xy_data[i][1]))", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def print(self):\n\n predator_0 = self.add_vectors(self._state[0], self._offset)\n predator_0 = predator_0[0] * self.nrow + predator_0[1]\n predator_1 = self.add_vectors(self._state[1], self._offset)\n predator_1 = predator_1[0] * self.nrow + predator_1[1]\n prey = (self._offset[0] * self.nrow + self._offset[1])\n\n cell = 0\n line = \" \" + \"\".join([\" {}\".format(i) for i in range(self.ncol)])\n print(line)\n for i in range(self.nrow):\n line = f\"{i} |\"\n for j in range(self.ncol):\n if cell == prey:\n line += \"X|\"\n elif cell == predator_0:\n line += \"0|\"\n elif cell == predator_1:\n line += \"1|\"\n else:\n line += \" |\"\n cell += 1\n print(line)\n print()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the data in slice iz of an image to standard out.
def print_slice(input, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
[ "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def test_display_image(data_dir):\n epi_img = nib.load(data_dir)\n epi_img_data = epi_img.get_data()\n slice_1 = epi_img_data[73:106, 93:126, 95]\n slice_2 = epi_img_data[:, :, 95]\n preprocess.show_slices([slice_1, slice_2])", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def display_img_info(img) :\n print (img.shape, img.ndim, img.dtype.name, type(img))", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def printImage(imageObject):\n # TODO\n pass", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n lm_i = self.gtdb['lm'][i, :]\n\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))\n print 'The landmarks points are: {}'.format(lm_i)", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_sitk_info(itk_image):\n print(\"[INFO]: Shape - {itk_image.GetSize()}\")\n print(\"[INFO]: Spacing - {itk_image.GetSpacing()}\")\n print(\"[INFO]: Origin - {itk_image.GetOrigin()}\")\n print(\"[INFO]: Direction - {itk_image.GetDirection()}\\n\")", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def printImageFile(ifobj):\r\n print(f\"fileName:'{ifobj.fileName}' fileType:'{ifobj.fileType}' fileSize:'{ifobj.fileSize}'\"\r\n f\" fileColorMode:'{ifobj.fileColorMode}' fileLOC:'{ifobj.fileLOC}'\")", "def printSlicerVars(self):\r\n print(\"in_dir:\\t{}\".format((self.curr_dir_lbl.get())))\r\n print(\"out_dir\\t{}\".format((self.out_dir_lbl.get())))\r\n print(\"img_ext\\t{}\".format((self.img_ext)))\r\n print(\"mode\\t{}\".format((self.mode.get())))\r\n print(\"reverse\\t{}\".format((self.reverse.get())))\r\n print(\"curve_depth\\t{}\".format((self.curve_depth)))\r\n print(\"num_slices\\t{}\".format((self.num_slices_entry.get())))\r\n print(\"num_imgs\\t{}\".format((self.num_imgs)))", "def dump(arr, filename):\n permutation = tuple(range(arr.ndim)[::-1])\n if np.all(np.array(arr.shape) < 65536 // 2): # OK to go with Nifti1\n i = nib.Nifti1Image(arr.transpose(permutation), np.eye(4))\n else: # only with Nifti2\n i = nib.Nifti2Image(arr.transpose(permutation), np.eye(4))\n nib.save(i, filename)", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the data in slice iz of an image to standard out in a format that agrees with v2
def print_image_slice(input, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny-1,-1,-1): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
[ "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def test_display_image(data_dir):\n epi_img = nib.load(data_dir)\n epi_img_data = epi_img.get_data()\n slice_1 = epi_img_data[73:106, 93:126, 95]\n slice_2 = epi_img_data[:, :, 95]\n preprocess.show_slices([slice_1, slice_2])", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def display_img_info(img) :\n print (img.shape, img.ndim, img.dtype.name, type(img))", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def prettyPrint3D(M):\n m, n_C, n_H, n_W = M.shape\n\n for i in range(m):\n \n for c in range(n_C):\n print('Image {}, channel {}'.format(i + 1, c + 1), end='\\n\\n') \n\n for h in range(n_H):\n print(\"/\", end=\"\")\n\n for j in range(n_W):\n\n print(M[i, c, h, j], end = \",\")\n\n print(\"/\", end='\\n\\n')\n \n print('-------------------', end='\\n\\n')", "def _write_image(self):\n return \" \".join(str(i) for i in self.image)", "def printImage(imageObject):\n # TODO\n pass", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def get_faces_data_slice_str(self, i):\n\n i_list = ['{0:.18e}'.format(face.Data[i]) for face in self.Faces]\n i_str = ' '.join(i_list)\n\n return i_str", "def print_sitk_info(itk_image):\n print(\"[INFO]: Shape - {itk_image.GetSize()}\")\n print(\"[INFO]: Spacing - {itk_image.GetSpacing()}\")\n print(\"[INFO]: Origin - {itk_image.GetOrigin()}\")\n print(\"[INFO]: Direction - {itk_image.GetDirection()}\\n\")", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n lm_i = self.gtdb['lm'][i, :]\n\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))\n print 'The landmarks points are: {}'.format(lm_i)", "def DisplayArray(a, fmt='jpeg', rng=[0,1]):\n a = (a - rng[0])/float(rng[1] - rng[0])*255\n a = np.uint8(np.clip(a, 0, 255))\n with open(\"lake_py_{0}.jpg\".format(hvd.rank()), \"w\") as f: \n PIL.Image.fromarray(a).save(f, \"jpeg\")", "def printImageFile(ifobj):\r\n print(f\"fileName:'{ifobj.fileName}' fileType:'{ifobj.fileType}' fileSize:'{ifobj.fileSize}'\"\r\n f\" fileColorMode:'{ifobj.fileColorMode}' fileLOC:'{ifobj.fileLOC}'\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read data from text file, if ncol = 1, read all columns if ncol >= 0, just read the (ncol+1)th column.
def read_text_file(file_name, ncol = 0): from string import split inf = file(file_name, "r") line = inf.readline() data = [] while len(line) > 0: if ncol == -1: vdata = split(line) if data == []: for i in xrange(len(vdata)): data.append([float(vdata[i])]) else: for i in xrange(len(vdata)): data[i].append(float(vdata[i])) else: vdata = float(split(line)[ncol]) data.append(vdata) line = inf.readline() return data
[ "def readOFColumnData(dataFile,nCol):\n fileCheck(dataFile) # does the file exists ? Stop if not.\n #\n # Init list\n data = []\n #\n for line in fileinput.input(dataFile):\n # remove parenthesis if any\n line = line.replace('(', '')\n line = line.replace(')', '') \n # divide each element of the line into words\n words = line.split()\n if words: # if there is a line in fact\n if words[0][0]!='#': #do something only if not comment \n data.append(float(words[nCol])) \n # \n return data", "def _read_n_columns(self, n: int) -> List[List[str]]:\n n_columns = [[] for _ in range(n)]\n for line in self.text_stream.content:\n if not line.split(): # If line is ''\n continue\n else:\n sp = line.split()\n try:\n for i, column in enumerate(n_columns):\n column.append(sp[i])\n except IndexError:\n print('You want more columns than you have! Check your file at line {0}!'.format(line))\n return n_columns", "def readLines(filename, col=None):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n lines = [ s.rstrip(\"\\n\\r\") for s in lines ]\n if col == None:\n return lines\n else:\n return [ s.split(\"\\t\")[col] for s in lines ]", "def read_column(file_name, column_number):\n flist = []\n empty_lines = 0\n fread = open(file_name,'r')\n for line in fread:\n chompedLine = line.rstrip()\n if not chompedLine:\n empty_lines += 1\n continue\n flist.append(float(chompedLine.split()[column_number-1]))\n\n return flist", "def read_dataset(ith_col=0):\r\n import numpy as np\r\n\r\n with open('dataset\\synthetic.data\\synthetic') as f:\r\n lines = f.readlines()\r\n\r\n col = []\r\n\r\n # read specified column\r\n for i in range(0, len(lines)):\r\n\r\n # clean string\r\n str = lines[i]\r\n\r\n # break a line of numbers by whitespace\r\n different_cols_str = str.split()\r\n\r\n # convert string to number of specified column\r\n col.append(float(different_cols_str[ith_col]))\r\n\r\n col = np.array(col, dtype=np.float64)\r\n col = np.reshape(col, (col.shape[0], 1))\r\n\r\n return col", "def load_two_column_data(file, rows_to_skip=0):\n\n data = loadtxt(file, skiprows = rows_to_skip)\n\n\n\n data_q = data[:,0]\n data_i = data[:,1]\n return ExpSasData(data_q, data_i)", "def read_data_6_columns(filename=\"ripple_082-085.dat\", skip=1):\n fileobj = open(filename, 'r')\n # ignore the first skip lines\n for i in range(skip):\n fileobj.readline()\n h = []; k = []; qr =[]; qz =[]; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n hval, kval, rval, zval, qval, Fval = line.split()\n h.append(int(hval)) \n k.append(int(kval))\n qr.append(float(rval))\n qz.append(float(zval))\n q.append(float(qval))\n F.append(float(Fval)) \n return h, k, qr, qz, q, F", "def read_file_lines(filename, cols, skip=0, stop=-1, column_major=False, separator='[\\t ]'):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [re.split(separator, l.strip()) for l in lines]]\n return np.transpose(res) if column_major else res", "def readTextFile(\n file,\n noDescriptionLines,\n noRows,\n noColumns,\n dataType,\n delimiter=\" \",\n VERBOSE=True,\n):\n if not os.path.isfile(file):\n throwError(\n \"Could not find the file '%s' for reading (in function 'readTextFile').\"\n % file\n )\n if VERBOSE:\n print(\"Reading the data from the ASCII file '%s' ... \" % file, end=\" \")\n f = open(file, \"r\")\n description = \"\"\n for i in range(noDescriptionLines):\n description += f.readline()\n dataSize = noRows * noColumns\n data = np.fromfile(f, dataType, dataSize, delimiter)\n data.shape = (noRows, noColumns)\n if VERBOSE:\n print(\"Done\")\n\n return [description, data]", "def read_one_column():\r\n while(1):\r\n print(\" \")\r\n print(\"Enter the input filename: \")\r\n input_file_path = stdin.readline()\r\n file_path = input_file_path.rstrip('\\n')\r\n#\r\n if not os.path.exists(file_path):\r\n print(\"This file doesn't exist\")\r\n #\r\n if os.path.exists(file_path):\r\n print(\"This file exists\")\r\n print(\" \")\r\n infile = open(file_path, \"rb\")\r\n lines = infile.readlines()\r\n infile.close()\r\n\r\n b = []\r\n num = 0\r\n for line in lines:\r\n #\r\n if sys.version_info[0] == 3:\r\n line = line.decode(encoding='UTF-8')\r\n\r\n if re.search(r\"(\\d+)\", line): # matches a digit\r\n iflag = 0\r\n else:\r\n iflag = 1 # did not find digit\r\n#\r\n if re.search(r\"#\", line):\r\n iflag = 1\r\n#\r\n if iflag == 0:\r\n line = line.lower()\r\n if re.search(r\"([a-d])([f-z])\", line): # ignore header lines\r\n iflag = 1\r\n else:\r\n line = line.replace(\",\", \" \")\r\n b.append(float(line))\r\n num = num+1\r\n break\r\n\r\n b = np.array(b)\r\n\r\n print(\"\\n samples = %d \" % num)\r\n return b, num", "def read_two_columns():\r\n while(1):\r\n print(\" \")\r\n print(\"Enter the input filename: \")\r\n input_file_path = stdin.readline()\r\n file_path = input_file_path.rstrip('\\n')\r\n#\r\n if not os.path.exists(file_path):\r\n print(\"This file doesn't exist\")\r\n #\r\n if os.path.exists(file_path):\r\n print(\"This file exists\")\r\n print(\" \")\r\n infile = open(file_path, \"rb\")\r\n lines = infile.readlines()\r\n infile.close()\r\n\r\n a = []\r\n b = []\r\n num = 0\r\n for line in lines:\r\n #\r\n if sys.version_info[0] == 3:\r\n line = line.decode(encoding='UTF-8')\r\n\r\n if re.search(r\"(\\d+)\", line): # matches a digit\r\n iflag = 0\r\n else:\r\n iflag = 1 # did not find digit\r\n#\r\n if re.search(r\"#\", line):\r\n iflag = 1\r\n#\r\n if iflag == 0:\r\n line = line.lower()\r\n if re.search(r\"([a-d])([f-z])\", line): # ignore header lines\r\n iflag = 1\r\n else:\r\n line = line.replace(\",\", \" \")\r\n col1, col2 = line.split()\r\n a.append(float(col1))\r\n b.append(float(col2))\r\n num = num+1\r\n break\r\n\r\n a = np.array(a)\r\n b = np.array(b)\r\n\r\n print(\"\\n samples = %d \" % num)\r\n return a, b, num", "def read_three_columns():\r\n while(1):\r\n print(\" \")\r\n print(\"Enter the input filename: \")\r\n input_file_path = stdin.readline()\r\n file_path = input_file_path.rstrip('\\n')\r\n#\r\n if not os.path.exists(file_path):\r\n print(\"This file doesn't exist\")\r\n #\r\n if os.path.exists(file_path):\r\n print(\"This file exists\")\r\n print(\" \")\r\n infile = open(file_path, \"rb\")\r\n lines = infile.readlines()\r\n infile.close()\r\n\r\n a = []\r\n b = []\r\n c = []\r\n num = 0\r\n for line in lines:\r\n #\r\n if sys.version_info[0] == 3:\r\n line = line.decode(encoding='UTF-8')\r\n\r\n if re.search(r\"(\\d+)\", line): # matches a digit\r\n iflag = 0\r\n else:\r\n iflag = 1 # did not find digit\r\n#\r\n if re.search(r\"#\", line):\r\n iflag = 1\r\n#\r\n if iflag == 0:\r\n line = line.lower()\r\n if re.search(r\"([a-d])([f-z])\", line): # ignore header lines\r\n iflag = 1\r\n else:\r\n line = line.replace(\",\", \" \")\r\n col1, col2, col3 = line.split()\r\n a.append(float(col1))\r\n b.append(float(col2))\r\n c.append(float(col3))\r\n num = num+1\r\n break\r\n\r\n a = np.array(a)\r\n b = np.array(b)\r\n c = np.array(c)\r\n\r\n print(\"\\n samples = %d \" % num)\r\n return a, b, c, num", "def read_slurm_file(filename, cols, skip=54, stop=-34, column_major=True):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:stop]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [l.split() for l in lines]]\n return np.transpose(res) if column_major else res", "def get_data(file,cols=0,nrows='all'):\n if type(cols)==type(0):\n cols=(cols,)\n nvar=1\n else: nvar=len(cols)\n data=get_str(file,cols,nrows)\n if nvar==1: return array(list(map(float,data)))\n else:\n data=list(data)\n for j in range(nvar): data[j]=array(list(map(float,data[j])))\n return tuple(data)", "def read_cols(file):\n df = pd.read_csv(file, sep='\\t', nrows=1, #skiprows=6,\n encoding='utf-8')\n return list(df.columns)", "def read_data_4_columns(filename=\"ripple_082-085.dat\"):\n # Process comment and header lines\n fileobj = open(filename, 'r')\n while True:\n s = fileobj.readline()\n if s.startswith('#'):\n print(s)\n continue\n elif s.startswith('h'):\n break\n else:\n print(\"Any comments (including an empty line) should start with #.\")\n print(\"Please fix your input file.\")\n sys.exit(1)\n \n # Go through data points \n h = []; k = []; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n # This ignores an empty line\n line = line.rstrip()\n if not line: \n continue\n hval, kval, qval, Fval = line.split()\n h.append(int(hval))\n k.append(int(kval)) \n q.append(float(qval))\n F.append(float(Fval))\n return h, k, q, F", "def read_table( filename, ncols, ignore='#' ):\n\n\t# Initialize result dictionary\n\tresult = {}\n\tfor i in range(ncols):\n\t\tresult[i] = []\n\t\n\ttry : f = open( filename, 'r' )\n\texcept:\n\t\tprint 'ERROR: file not found'\n\t\treturn\n\t\n\tend_of_file = False\n\twhile not end_of_file:\n\t\ttry:\n\t\t\ttemp = f.readline()\n\t\t\tif temp[0] == ignore : pass # Ignore the ignore character\n\t\t\telse:\n\t\t\t\ttemp = temp.split()\n\t\t\t\tfor i in range(ncols) : result[i].append( np.float(temp[i]) )\n\t\texcept:\n\t\t\tend_of_file = True\n\t\n\tf.close()\n\treturn result", "def read_data(filename,skip_lines=2):\n\n\twith open(filename) as f:\n\t\tlines = [line.strip('\\n') for line in f.readlines()]\n\t\n\t# Getting shape of the data from the (skip_lines -1)th line \n\n\tshape=tuple(map(int,lines[skip_lines-1].split()))\n\t\n\t# Converting values read to integer\n\tvalues = np.array(map(int,lines[skip_lines:]))\n\tshape = (shape[1],shape[0])\n\n\tif shape[0]!=1:\n\t\treturn np.reshape(values,shape).T\n\telse:\n\t\treturn values.reshape(-1,1)", "def read_data(f):\n\tdata = \"\"\n\tfor i in range(DATA_NLINES):\n\t\tdata += f.readline()\n\n\tif len(data.split(\"\\n\")) < DATA_NLINES:\n\t\treturn None\n\treturn parse_data(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
linearly interpolate a 1D power spectrum to required length with required Pixel size input_object a 1D list with a 1D curve to be interpolated length_current half size of the image size (in case of power spectrum, it can be different from the length of the input_object) length_interpolated length of the interpolated 1D curve Pixel_size_current pixel size of the input 1D list Pixel_size_interpolated pixel size of the target 1D list One can either input the two lengths or two respective pixel sizes
def reshape_1d(input_object, length_current=0, length_interpolated=0, Pixel_size_current = 0., Pixel_size_interpolated = 0.): interpolated = [] if length_current == 0: length_current = len(input_object) lt = len(input_object) - 2 if length_interpolated == 0: if( Pixel_size_interpolated != Pixel_size_current): length_interpolated = int(length_current*Pixel_size_current/Pixel_size_interpolated + 0.5) else: ERROR("Incorrect input parameters","reshape_1d",1) return [] if Pixel_size_current == 0.: Pixel_size_current = 1. Pixel_size_interpolated = Pixel_size_current*float(length_current)/float(length_interpolated) qt =Pixel_size_interpolated/Pixel_size_current for i in xrange(length_interpolated): xi = float(i)*qt ix = min(int(xi),lt) df = xi -ix xval = (1.0-df)*input_object[ix] + df*input_object[ix+1] interpolated.append(xval) return interpolated
[ "def interpolation(self):\n def find_min_delta_omega(oemga:ndarray):\n \"\"\"计算频率间隔的最小值\n Parameters\n ----------\n oemga : ndarray\n 频率数组\n\n Returns\n -------\n float \n min_delta_omega\n \"\"\"\n\n min_delta_omega = abs(omega[0] - omega[1])\n for index in range(0, len(omega) - 1):\n delta_omega = abs(omega[index] - omega[index+1])\n if delta_omega < min_delta_omega:\n min_delta_omega = delta_omega\n\n return min_delta_omega\n\n def generate_equal_space_array(start:float, step:float, N: int) :\n \"\"\"产生一个长度为N的数组\n\n Parameters\n ----------\n start : float\n 数组起始值\n step : float\n 数组间距\n N : int\n 数组长度\n \"\"\"\n return np.arange(0, N, 1) * step\n\n flatten = lambda array: array.flatten() # 将数组变成一维\n\n omega, power = map(flatten, np.hsplit(self.spectrum, 2))\n interp_fun = interp1d(omega, power, kind='linear', bounds_error=False, fill_value=(0, 0))\n\n min_delta_omega = find_min_delta_omega(omega)\n\n N = int(self.omega_max / min_delta_omega)\n\n omega_new = generate_equal_space_array(0, min_delta_omega, N + 1)\n\n power_new = interp_fun(omega_new)\n\n self.interpolated_spectrum = np.column_stack([omega_new, power_new])\n\n # plt.plot(omega, power, 'r', omega_new, power_new, 'b+')\n\n # plt.show() ", "def _LinearWave2(Edge1PixNo,Edge1Height,Edge2PixNo,Edge2Height,offsetQ=0,arraylenQ=5002):\n itt=0\n h1=Edge1Height-offsetQ\n h2=Edge2Height-offsetQ\n p1=int(Edge1PixNo)-1\n p2=int(Edge2PixNo)-1\n NewList=[]\n #\n while itt<arraylenQ:\n if itt<p1:\n NewList.append(offsetQ-offsetQ)\n elif p1<=itt<=p2:\n nextval=h2+((itt-p2)*(h2-h1)/float(p2-p1))#h1*((h2/float(h1))**((itt-p1)/float(p2-p1)))\n NewList.append(nextval)\n else:# itt>p2:\n NewList.append(offsetQ-offsetQ)\n itt+=1\n return np.array(NewList)+offsetQ", "def lin_interp(lengths,Hv,Lv):\n \n ar_long = np.array(lengths)\n dh_dl = (Lv-Hv)/lengths.sum()\n H_riv = np.zeros(ar_long.shape[0],dtype=np.float) \n for idx in range(ar_long.shape[0]):\n if idx == 0:\n len_cum = 0.5 * ar_long[0]\n else:\n len_cum += 0.5 * (ar_long[idx-1]+ar_long[idx])\n H_riv[idx] = Hv + len_cum * dh_dl\n return H_riv", "def interpolate(x_1, x_2, x_3, newlength):\n line_segment = x_3 - x_2\n scale_value = newlength / line_segment\n return (x_1 - x_2) * scale_value", "def interpolate(signal, new_length):\n assert len(signal) > 1 and len(signal[0]) > 1\n current_length = len(signal)\n signal = np.array(signal).T\n new_signal = []\n x_array = get_x_array(current_length, new_length)\n\n for l in range(len(signal)):\n fp = signal[l]\n xp = list(range(current_length))\n new_f = np.interp(x_array, xp, fp)\n new_signal.append(new_f)\n\n signal = np.array(new_signal).T\n return signal", "def interpolate(final_length, current_length, current_data):\n if final_length <= current_length:\n # TODO gestire questo caso\n return current_data\n\n final_data = np.zeros(final_length, dtype=None)\n up_factor = final_length // current_length\n for i in range(current_length - 1):\n increment = (current_data[i + 1] - current_data[i]) / up_factor\n for j in range(up_factor - 1):\n final_data[((i - 1) * up_factor) + j] = current_data[i] + j * increment\n return final_data", "def _interp_spect(orig_data, orig_wavl, new_wavl,\n interp='linear'):\n # Input checking\n orig_data = np.asarray(orig_data, dtype=orig_data.dtype)\n orig_wavl = np.asarray(orig_wavl, dtype=orig_wavl.dtype)\n new_wavl = np.asarray(new_wavl, dtype=new_wavl.dtype)\n\n if orig_data.shape != orig_wavl.shape:\n raise ValueError('_interp_spect received data and wavelength '\n 'arrays of different shapes')\n\n interp_func = interpolate.interp1d(\n orig_wavl,\n orig_data,\n kind=interp,\n fill_value=np.nan,\n bounds_error=False,\n )\n regrid_data = interp_func(new_wavl)\n # regrid_data = np.interp(new_wavl, orig_wavl, orig_data, )\n return regrid_data", "def _linear_interpolation(\n prevFrame : \"np.ndarray\",\n cFrame : \"np.ndarray\",\n fID : \"int\",\n smoothingFrames : \"int\"\n ) -> \"np.ndarray\":\n prevWeight = 1-((fID+1)/smoothingFrames)\n finalWeight = (fID+1)/smoothingFrames\n transitionFrame = prevWeight * prevFrame + finalWeight*cFrame\n return transitionFrame.astype(np.uint8)", "def linear_interpolator(partition_threshold, spanning_link):\r\n #print(\"Entering linear_interpolator()\")\r\n global weight_range\r\n for link_row in range(len(links)):\r\n if links[link_row, li_idx] == spanning_link:\r\n total_length = float(links[link_row, lr_Length])\r\n start = weight_range[link_row, 0]\r\n weight_ratio = (partition_threshold - start)/weighting_function(total_length)\r\n length_from_start = weight_ratio*total_length\r\n print(\"The length from upstream node is: \" + str(length_from_start))\r\n return length_from_start", "def _ExponentialWave2(Edge1PixNo,Edge1Height,Edge2PixNo,Edge2Height,offsetQ=0,arraylenQ=5002):\n itt=0\n h1=Edge1Height-offsetQ\n h2=Edge2Height-offsetQ\n p1=int(Edge1PixNo)-1\n p2=int(Edge2PixNo)-1\n NewList=[]\n #\n while itt<arraylenQ:\n if itt<Edge1PixNo:\n NewList.append(offsetQ-offsetQ)\n elif p1<=itt<=p2:\n nextval=h1*((h2/float(h1))**((itt-p1)/float(p2-p1)))\n NewList.append(nextval)\n else:# itt>p2:\n NewList.append(offsetQ-offsetQ)\n itt+=1\n return np.array(NewList)+offsetQ", "def test_linear_interpolation(self):\n i = 10\n sample = self.ds.X[i, :]\n\n ref_img = None # Use default reference image\n m = 50 # Number of steps\n\n ret = self.explainer.linearly_interpolate(sample, ref_img, m)\n\n self.assertEqual(m, len(ret))\n self.assertIsInstance(ret[10], CArray)\n self.assertEqual(ret[10].shape, sample.shape)", "def refine_time_steps(path_steps, light_curve_in):\n light_curve = light_curve_in\n light_curve = np.log10(np.abs(light_curve) + 1) # the 1 term is to prevent log(0) errors\n n_steps = path_steps.shape[0]\n # find all prominent peaks\n peaks, properties = find_peaks(light_curve, prominence=0.1, width=0)\n # we spread the same number of steps as the original light curve, only between the detected peaks.\n n_peaks = len(peaks) # the number of peaks\n print('Found '+str(n_peaks)+' peaks')\n print(peaks)\n print(properties[\"widths\"])\n print(properties[\"prominences\"])\n tot_width = sum(1 / np.array(properties[\"widths\"]))\n # a list of weights of each peak based on their widths, normalized to the number of steps of the refined path\n weights = [int(1 / tmp_width / tot_width * n_steps) for tmp_width in properties[\"widths\"]]\n # left limit of each peak, in same coordinates as input steps\n left_lims = path_steps[[int(tmp_lim) for tmp_lim in peaks - 2 * (properties[\"widths\"] / 2)], :]\n # right limit of each peak, in same coordinates as input steps\n right_lims = path_steps[[int(tmp_lim) for tmp_lim in peaks + 2 * (properties[\"widths\"] / 2)], :]\n # creating the refined path steps\n new_path_steps_h = []\n new_path_steps_v = []\n for i, peak in enumerate(peaks):\n tmp_steps_h =np.linspace(left_lims[i,0], right_lims[i,0],num=weights[i],endpoint=True)\n tmp_steps_v =np.linspace(left_lims[i,1], right_lims[i,1],num=weights[i],endpoint=True)\n new_path_steps_h = np.append(new_path_steps_h, tmp_steps_h)\n new_path_steps_v = np.append(new_path_steps_v, tmp_steps_v)\n return np.vstack((new_path_steps_h,new_path_steps_v)).T # to yield an (n,2) array", "def interpolate_subspec(wls, fls, prev_index, gap_ind, wl_step):\n # Get the subspectrum\n sub_spec_wls = wls[prev_index:gap_ind+1]\n sub_spec_fls = fls[prev_index:gap_ind+1]\n # Interpolate onto the new grid, using linear interpolation.\n interp_f = interp1d(sub_spec_wls, sub_spec_fls, kind=\"linear\")\n\n # Calculate the number of linear wavelength steps needed.\n min_wl = min(sub_spec_wls)\n max_wl = max(sub_spec_wls)\n n_steps = math.ceil((max_wl - min_wl) / wl_step)\n # Try a couple step sizes to get as close to the ideal size as possible.\n new_wls1, step_size1 = numpy.linspace(min_wl, max_wl, n_steps,\n retstep=True)\n new_wls2, step_size2 = numpy.linspace(min_wl, max_wl, n_steps+1,\n retstep=True)\n new_wls3, step_size3 = numpy.linspace(min_wl, max_wl, n_steps-1,\n retstep=True)\n # Choose the linear step size closest to our desired step size.\n diffs = [abs(x-wl_step) for x in [step_size1, step_size2, step_size3]]\n if diffs[0] <= diffs[1] and diffs[0] <= diffs[2]:\n new_wls = new_wls1\n elif diffs[1] <= diffs[2] and diffs[1] <= diffs[0]:\n new_wls = new_wls2\n else:\n new_wls = new_wls3\n # Calculate the interpolated values and extend the spectrum with them.\n return (list(new_wls), list(interp_f(new_wls)))", "def extend(x_end, y_end, length):\n\n end_len = len(x_end)\n period = (x_end[-1] - x_end[0])/2\n dx = (x_end[-1] - x_end[0])/(end_len - 1)\n extnum = length + end_len\n x_ext = np.linspace(x_end[0], x_end[0] + extnum*dx, num=extnum,\n endpoint=False)\n\n weights = np.exp(-2*(x_end-x_end[0])/period)\n\n\n def hill(t, K, start=0, finish=1, n=3):\n \"\"\" Ensure a smooth transition from the original trajectory to\n the fitted extension. Should be start at t=0, finish at t=t[-1]\n \"\"\"\n\n hill_term = start + (finish-start)*(t**n/(K**n + t**n))\n offset = (hill_term[-1]/finish)\n hill_term *= 1/offset\n return hill_term\n\n try:\n end_pars, end_pcov = fit_decaying_sinusoid(x_end, y_end,\n weights=weights)\n \n # Sharper transition from y_end to y_fit\n merge = hill(x_end, x_end[-1] - period/2, n=100)\n\n except RuntimeError:\n # Error with the fitting.\n weights = np.exp(-1*(x_end-x_end[0])/period)[::-1]\n end_pars, end_pcov = fit_decaying_sinusoid(x_end, y_end,\n weights=weights)\n \n # Smoother transition from y_end to y_fit\n merge = hill(x_end, x_end[-1] - period/2, n=20)\n \n\n y_fit = decaying_sinusoid(x_ext, *_pars_to_plist(end_pars))\n\n\n y_ext = np.zeros(*x_ext.shape)\n y_ext[:end_len] += y_end * (1 - merge)\n y_ext[:end_len] += y_fit[:end_len] * (merge)\n y_ext[end_len:] = y_fit[end_len:]\n\n return x_ext, y_ext", "def _LinearWave(Edge1PixNo,Edge1Height,Edge2PixNo,Edge2Height):\n itt=0\n NewString=''\n if Edge1Height>65535:\n print('Edge1 height exceeds max value of 65535')\n h1=65535\n elif Edge1Height<0:\n print('Edge1 height must be positive')\n h1=0\n else:\n h1=int(Edge1Height)\n if Edge2Height>65535:\n print('Edge2 height exceeds max value of 65535')\n h2=65535\n elif Edge2Height<0:\n print('Edge2 height must be positive')\n h2=0\n else:\n h2=int(Edge2Height)\n #\n if Edge1PixNo>Edge2PixNo:\n print('Edge1 must come before Edge2')\n Dummy=int(Edge1PixNo)\n Edge1PixNo=int(Edge2PixNo)\n Edge2PixNo=Dummy\n if Edge1PixNo<1:\n print('Edge1 pixel number must be >=1')\n p1=0\n elif Edge1PixNo>140:\n print('Edge1 pixel number must be <=140')\n p1=139\n else:\n p1=int(Edge1PixNo)-1\n if Edge2PixNo<1:\n print('Edge2 pixel number must be >=1')\n p2=0\n elif Edge2PixNo>140:\n print('Edge2 pixel number must be <=140')\n p2=139\n else:\n p2=int(Edge2PixNo)-1\n #\n if p1==p2:\n print('Warning: pulse width specified as single pixel.')\n return 140*[0]\n #\n while itt<140:\n if itt<p1:\n NewString+='0000'\n elif p1<=itt<=p2:\n NewString+=HAWG._Hex2Byte(int(h2+((itt-p2)*(h2-h1)/float(p2-p1))))\n else:\n NewString+='0000'\n itt+=1\n return NewString", "def interpolation_2d(self, source, length):\n\n # placeholder for result data\n new_source = np.empty([length, source.shape[-1]], dtype=np.float32)\n # apply 1D interpolation along the channel axis\n for i in range(source.shape[-1]):\n new_source[:, i] = self.npinterp(source[:, i], length)\n return new_source", "def test_interpolate(p1: Tensor, p2: Tensor, size: int) -> None:\n points = torch.stack((p1, p2))[None]\n linear_interpolation(points, size)", "def test_interpolate_size():\n\n size = (10, 12)\n factors = (0.5, 1, 2)\n\n for factor in factors:\n expected_size = tuple(factor*x for x in size)\n transform = sumie.transforms.Interpolate(factor)\n image = torch.randn(1, 3, *size)\n out = transform(image)\n\n assert out.size() == (1, 3) + expected_size", "def lin_interp(x,x0,x1,y0,y1):\r\n \r\n return y0+(y1-y0)*(x-x0)/(x1-x0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather the a list of EMData on all nodes to the main node, we assume the list has the same length on each node.
def gather_EMData(data, number_of_proc, myid, main_node): from mpi import MPI_COMM_WORLD, MPI_INT, MPI_TAG_UB from mpi import mpi_send, mpi_recv l = len(data) gathered_data = [] inc = 1 # A temp measure if myid == main_node: for i in xrange(0, number_of_proc*inc, inc): if i == main_node: for k in xrange(l): gathered_data.append(data[k]) else: for k in xrange(l): im = recv_EMData(i, i*l+k) mem_len = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) members = mpi_recv(int(mem_len[0]), MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) members = map(int, members) im.set_attr('members', members) gathered_data.append(im) else: for k in xrange(l): send_EMData(data[k], main_node, myid*l+k) mem = data[k].get_attr('members') mpi_send(len(mem), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) mpi_send(mem, len(mem), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) return gathered_data
[ "def items(self):\n # Create an empty list of results\n result = [] # Constant time to create a new list\n # Start at the head node\n node = self.head # Constant time to assign a variable reference\n # Loop until the node is None, which is one node too far past the tail\n while node is not None: # Always n iterations because no early exit\n # Append this node's data to the results list\n result.append(node.data) # Constant time to append to a list\n # Skip to the next node\n node = node.next # Constant time to reassign a variable\n # Now result contains the data from all nodes\n return result # Constant time to return a list", "def items(self):\n # Create an empty list of results\n result = [] # Constant time to create a new list\n # Start at the head node\n node = self.head # Constant time to assign a variable reference\n # node.previous = None\n # Loop until the node is None, which is one node too far past the tail\n while node is not None: # Always n iterations because no early exit\n # Append this node's data to the results list\n result.append(node.data) # Constant time to append to a list\n # Skip to the next node\n node = node.next # Constant time to reassign a variable\n # Now result contains the data from all nodes\n return result # Constant time to return a list", "def collect(self, pds):\n\n #Send the data we have back to the master\n _ = self.comm.gather(pds.python_list, root=0)", "def all_gather(data):\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n if type(data) is torch.Tensor:\n data = data.cpu()\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.LongTensor([tensor.numel()]).to(\"cuda\")\n size_list = [torch.LongTensor([0]).to(\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.ByteTensor(size=(max_size,)).to(\"cuda\"))\n if local_size != max_size:\n padding = torch.ByteTensor(size=(max_size - local_size,)).to(\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data = pickle.loads(buffer)\n if type(data) is torch.Tensor:\n data = data.to(\"cuda\")\n data_list.append(data)\n\n return data_list", "def all_gather(data):\n world_size = dist.get_world_size()\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data) #write data into Bytes and stores in buffer\n np_buffer = np.frombuffer(buffer, dtype=np.int8)\n tensor = paddle.to_tensor(np_buffer, dtype='int32') # uint8 doese not have many ops in paddle\n\n # obtain Tensor size of each rank\n local_size = paddle.to_tensor([tensor.shape[0]])\n size_list = []\n dist.all_gather(size_list, local_size)\n max_size = max(size_list)\n\n # receiving tensors from all ranks, \n # all_gather does not support different shape, so we use padding\n tensor_list = []\n if local_size != max_size:\n padding = paddle.empty(shape=(max_size - local_size, ), dtype='int32')\n tensor = paddle.concat((tensor, padding), axis=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.astype('uint8').cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def all_gather(data):\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to('cuda')\n\n # obtain Tensor size of each rank\n local_size = torch.tensor([tensor.numel()], device='cuda')\n size_list = [torch.tensor([0], device='cuda') for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(\n torch.empty((max_size, ), dtype=torch.uint8, device='cuda'))\n if local_size != max_size:\n padding = torch.empty(\n size=(max_size - local_size, ), dtype=torch.uint8, device='cuda')\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def get_all_nodes(self):\n self.all_nodes = self.main_nodes + self.ref_nodes", "def get_node_data(self, nodes):\n labeled_data = [self[node].assign(node=node) for node in nodes]\n return pd.concat(labeled_data).sort_index()", "def compute_nodeset(data):\n xset = NodeSet()\n for nodeset in data.split():\n xset.update(nodeset)\n return xset", "def items(self):\n items = [] # O(1) time to create empty list\n # Start at head node\n node = self.head # O(1) time to assign new variable\n # Loop until node is None, which is one node too far past tail\n while node is not None: # Always n iterations because no early return\n items.append(node.data) # O(1) time (on average) to append to list\n # Skip to next node to advance forward in linked list\n node = node.next # O(1) time to reassign variable\n #print(node)\n # Now list contains items from all nodes\n return items # O(1) time to return list", "def get_data(source_data: list[list[float]]) -> list[list[float]]:\n data_lists: list[list[float]] = []\n for data in source_data:\n for i, el in enumerate(data):\n if len(data_lists) < i + 1:\n data_lists.append([])\n data_lists[i].append(float(el))\n return data_lists", "def eventlist():\n\n infile = conf[\"run_path_derived\"] + 'LOCALIZED.txt'\n\n data = np.genfromtxt(infile, skip_header=1) \n\n mlt = cx.MAGtoMLT(data[:, 5], data[:, 0:5])\n\n # Swap mlat and mlon colums so in expected order (lat then long)\n data[:, [6,5]] = data[:, [5,6]]\n \n data = np.hstack((data, np.reshape(mlt, (mlt.shape[0], 1))))\n \n return data", "def __get_nodes_data(self):\n return self.__repo.get_all()", "def add_node_pmids(self):\n self.response.debug(f\"Adding node PMIDs\")\n self.response.info(f\"Adding pubmed ID's to nodes based on occurrence in PubMed abstracts\")\n self.response.warning(f\"Utilizing API calls to NCBI eUtils, so this may take a while...\")\n name = \"pubmed_ids\"\n type = \"EDAM:data_0971\"\n value = \"\"\n url = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils\"\n\n # iterate over KG edges, add the information\n try:\n for key, node in self.message.knowledge_graph.nodes.items():\n # Make sure the attributes are not None\n if not node.attributes:\n node.attributes = [] # should be an array, but why not a list?\n # now go and actually get the NGD\n node_curie = key\n node_name = node.name\n pmids = NGD.get_pmids_for_all([node_curie], [node_name])[0] # since the function was designed for multiple inputs, but I only want the first\n\n if 'max_num' in self.parameters:\n pmids = pmids[0:self.parameters['max_num']]\n value = pmids\n ngd_edge_attribute = NodeAttribute(type=type, name=name, value=value, url=url) # populate the NGD edge attribute\n node.attributes.append(ngd_edge_attribute) # append it to the list of attributes\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong adding the PubMed ID attributes\")\n else:\n self.response.info(f\"PubMed ID's successfully added to nodes\")\n\n return self.response", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")", "def getComponentEMobjects(self):\n\n em_list = [] \n \n for p in range(len(self.p)):\n mask = (self.readassignments == p)\n EM = self.constructEM(self.EM.reads[mask, :], self.EM.mutations[mask, :])\n em_list.append(EM)\n\n return em_list", "def get_all_nodes(self):\n pass", "def items(self):\n items = []\n current = self.head\n while current != None:\n items.append(current.data)\n current = current.next\n return items" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write headers from files in data into a disk file called filename. The filename has to be either hdf or bdb. lima list with positions in the disk files into which headers will be written, i.e., header from data[k] will be written into file number lima[k]
def write_headers(filename, data, lima): from utilities import file_type from EMAN2db import db_open_dict ftp = file_type(filename) if ftp == "bdb": # For unknown reasons this does not work on Linux, but works on Mac ??? Really? DB = db_open_dict(filename) for i in range(len(lima)): DB.set_header(lima[i], data[i]) DB.close() #for i in range(len(lima)): # data[i].write_image(filename, lima[i]) elif ftp == "hdf": for i in range(len(lima)): data[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True) else: ERROR("Unacceptable file format","write_headers",1)
[ "def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def writeHeader(cls, f):\n s = []\n s.append(\"# basename is the base file name of the data files.\")\n s.append(\"# modelbase is the base file name of the dist, fmod, pmode files corresponding to the data files\")\n s.append(\"# $Id:$\")\n s.append(\"#\")\n s.append(\"# basename modelbase ditherx dithery seeing norm airmass\")\n s.append(\"#\")\n f.write('\\n'.join(s) + \"\\n\")", "def get_headers(files, outpath):\n for path in files:\n name = ospath.basename(path)\n hdul = fits.open(path)\n nhdu = fits.PrimaryHDU(header=hdul[0].header)\n nhdul = fits.HDUList([nhdu])\n nhdul.writeto(ospath.join(outpath, name))", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def writeFitsHeader(filename, model, inc, pa, azi):\n data, header = fits.getdata(filename, header=True)\n header['DISTANCE'] = model.distance, 'Distance in parsec.'\n header['CHEMMOD'] = model.header.fn.split('/')[-1], 'Chemical model used.'\n header['INC'] = inc, 'Inclianation in radians.'\n header['PA'] = pa, 'Position angle in radians.'\n header['AZI'] = azi, 'Azimuthal angle in radians.'\n header['NMODELS'] = model.nmodels, 'Number of models averaged.'\n header['NAXIS3'] = data.shape[0]\n header['CDELT3'] = model.velres\n header['CRPIX3'] = (data.shape[0] + 1.) / 2.\n fits.writeto(filename, data, header, overwrite=True)\n return", "def write_headers(outfile, header):\n from struct import pack\n\n outfile.write(pack('fff',\n header['numnodes'],\n header['numdims'],\n header['numtimesteps']\n )\n )", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def create_header_file(name, tensor_name, tensor_data, output_path):\n file_path = pathlib.Path(f\"{output_path}/\" + name).resolve()\n # Create header file with npy_data as a C array\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\n \"\\n\"\n + f\"const size_t {tensor_name}_len = {tensor_data.size};\\n\"\n + f'__attribute__((section(\".data.tvm\"), aligned(16))) int8_t {tensor_name}[] = \"'\n )\n\n data_hexstr = tensor_data.tobytes().hex()\n for i in range(0, len(data_hexstr), 2):\n header_file.write(f\"\\\\x{data_hexstr[i:i+2]}\")\n header_file.write('\";\\n\\n')", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def generate_header(data, outfile):\n ts_count = count_timesteps(outfile.name)\n header = {'numnodes': len(data),\n 'numdims': 4,\n 'numtimesteps': ts_count\n }\n\n return header", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def _writeHeaders(self):\n data = compress(\"\".join(\n uint32.pack(len(key)) + key\n + uint32.pack(len(value)) + value\n for (key, value) in self.headers))\n self.output.write(\"\".join([\n magicMarker,\n uint32.pack(len(data)),\n uint32.pack(crc32(data) & 0xffffffff),\n data,\n uint32.pack(eohMarker)]))\n self.headersWritten = True", "def make_headers(self):\n f = open(rospack.get_path('e190_bot')+\"/data/\"+self.file_name, 'a+')\n f.write('{0} {1:^1} {2:^1} {3:^1} {4:^1} {5:^1} \\n'.format('TIME','X','Y','LIR','CIR','RIR'))\n f.close()", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def write_headers(self, header):\n self.mb_dir_path.mkdir(parents=True, exist_ok=True)\n for i in range(self.n_boxes):\n mb_path = self.path_to_mailbox(i)\n with mb_path.open(\"w\") as fh:\n fh.write(header)", "def _reportDataFile(self, dataFileName, outputFile):\n #subsequent access to the file should be open for \"append\"-ing\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\" ' +AutoGrader.Const.HEADER_COLOR2 + '\"><br>\\n------------- ' + os.path.split(dataFileName)[1] + ' -------------</font>\\n')\n f.close()", "def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()", "def write_header(dot_H_code_filename):\n\n filename = dot_H_code_filename.replace(\".\", \"_\")\n filename = filename.upper()\n filename = \"__\" + filename + \"_\"\n\n first_str = \"\"\"\n#pragma once\n\n\n#ifndef \"\"\"\n\n second_str = \"#define \"\n\n third_str = \"\"\"\n \n#include <string>\n#include <vector>\n#include <map>\n\n//using namespace std;\n\n\"\"\"\n\n d_header= \"\".join((first_str,\n filename,\n \"\\n\",\n second_str,\n filename,\n \"\\n\",\n third_str) )\n return d_header", "def writeFitsHeader(self, filename=None, extension=0):\n\n if filename is None:\n f_out = self.origin\n else:\n f_out = filename\n hdu = fits.open(f_out, mode='update')\n hdu[extension].header = self.header\n hdu[extension].update_header()\n hdu.flush()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write header from a single file data into a disk file called filename. The filename has to be either hdf or bdb. lima position in the disk files into which header will be written, i.e., header from data will be written into file number lima
def write_header(filename, data, lima): from utilities import file_type from EMAN2db import db_open_dict ftp = file_type(filename) if ftp == "bdb": DB = db_open_dict(filename) DB.set_header(lima, data) elif ftp == "hdf": data.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True) else: ERROR("Unacceptable file format","write_headers",1)
[ "def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):\n\t\t\tDB.set_header(lima[i], data[i])\n\t\tDB.close()\n\t\t#for i in range(len(lima)):\n\t\t#\tdata[i].write_image(filename, lima[i])\n\telif ftp == \"hdf\":\n\t\tfor i in range(len(lima)):\n\t\t\tdata[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def writeHeader(cls, f):\n s = []\n s.append(\"# basename is the base file name of the data files.\")\n s.append(\"# modelbase is the base file name of the dist, fmod, pmode files corresponding to the data files\")\n s.append(\"# $Id:$\")\n s.append(\"#\")\n s.append(\"# basename modelbase ditherx dithery seeing norm airmass\")\n s.append(\"#\")\n f.write('\\n'.join(s) + \"\\n\")", "def writeFitsHeader(filename, model, inc, pa, azi):\n data, header = fits.getdata(filename, header=True)\n header['DISTANCE'] = model.distance, 'Distance in parsec.'\n header['CHEMMOD'] = model.header.fn.split('/')[-1], 'Chemical model used.'\n header['INC'] = inc, 'Inclianation in radians.'\n header['PA'] = pa, 'Position angle in radians.'\n header['AZI'] = azi, 'Azimuthal angle in radians.'\n header['NMODELS'] = model.nmodels, 'Number of models averaged.'\n header['NAXIS3'] = data.shape[0]\n header['CDELT3'] = model.velres\n header['CRPIX3'] = (data.shape[0] + 1.) / 2.\n fits.writeto(filename, data, header, overwrite=True)\n return", "def write_headers(outfile, header):\n from struct import pack\n\n outfile.write(pack('fff',\n header['numnodes'],\n header['numdims'],\n header['numtimesteps']\n )\n )", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def writeFitsHeader(self, filename=None, extension=0):\n\n if filename is None:\n f_out = self.origin\n else:\n f_out = filename\n hdu = fits.open(f_out, mode='update')\n hdu[extension].header = self.header\n hdu[extension].update_header()\n hdu.flush()", "def write_header(self, filename):\r\n with open(filename, 'a', encoding='utf-8') as f:\r\n f.write(\"\\tSource port: {} Destination port: {}\\n\".format(self.src_port, self.dest_port))\r\n f.write(\"\\t\\tHeader size: {} Checksum: {}\\n\\n\".format(self.length, self.checksum))", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def _write_header(file):\n file.write(FILE_HEADER.format(datetime.utcnow()))", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def write_header(self):\n if self.report_writer is not None:\n self.report_writer.write(\"header structure \\n\")\n magic_number_encoded = bytearray(\n struct.pack(\"<I\", constants.APPDATA_MAGIC_NUM))\n\n if self.report_writer is not None:\n self.report_writer.write(\n \"{} Magic number - file identifier: {} \\n\".format(\n self.mem_writer.tell(), constants.APPDATA_MAGIC_NUM))\n self.mem_writer.write(magic_number_encoded)\n\n version_encoded = bytearray(\n struct.pack(\"<I\", constants.DSE_VERSION))\n if self.report_writer is not None:\n self.report_writer.write(\n \"{} File structure version: {} \\n\".format(\n self.mem_writer.tell(), constants.DSE_VERSION))\n self.mem_writer.write(version_encoded)\n\n self.space_used = 0", "def writeHeader(cls, f):\n s = []\n s.append(\"# global color=green dashlist=8 3 width=1 font=\\\"helvetica 8 normal roman\\\" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\")\n s.append(\"fk5\")\n f.write('\\n'.join(s) + \"\\n\")", "def _write_header(self, out_handle):\n out_handle.write(\"##gff-version 3\\n\")", "def write_sff_header(header, fh, num=None):\r\n\r\n lines = [\"Common Header:\"]\r\n if (num is not None):\r\n header[\"# of Flows\"] = num\r\n\r\n lines.extend([\" %s:\\t%s\" % (param, header[param])\r\n for param in header])\r\n fh.write(\"\\n\".join(lines) + \"\\n\\n\")", "def create_header_file(name, tensor_name, tensor_data, output_path):\n file_path = pathlib.Path(f\"{output_path}/\" + name).resolve()\n # Create header file with npy_data as a C array\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\n \"\\n\"\n + f\"const size_t {tensor_name}_len = {tensor_data.size};\\n\"\n + f'__attribute__((section(\".data.tvm\"), aligned(16))) int8_t {tensor_name}[] = \"'\n )\n\n data_hexstr = tensor_data.tobytes().hex()\n for i in range(0, len(data_hexstr), 2):\n header_file.write(f\"\\\\x{data_hexstr[i:i+2]}\")\n header_file.write('\";\\n\\n')", "def write_header(fpath, header):\n\n with open(fpath, 'r+') as f:\n content = f.read()\n # check if there is a shebang and encoding\n if content[:45] == '#!/usr/bin/env python\\n# -*- coding: utf-8 -*-':\n f.seek(46, 0)\n f.write('\\n' + header + content[46:])\n # check if there is only a shebang\n elif content[:21] == '#!/usr/bin/env python':\n f.seek(22, 0)\n f.write('\\n' + header + content[22:])\n # no shebang or encoding\n else:\n f.seek(0, 0)\n f.write(header + content)", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def write_header(dot_H_code_filename):\n\n filename = dot_H_code_filename.replace(\".\", \"_\")\n filename = filename.upper()\n filename = \"__\" + filename + \"_\"\n\n first_str = \"\"\"\n#pragma once\n\n\n#ifndef \"\"\"\n\n second_str = \"#define \"\n\n third_str = \"\"\"\n \n#include <string>\n#include <vector>\n#include <map>\n\n//using namespace std;\n\n\"\"\"\n\n d_header= \"\".join((first_str,\n filename,\n \"\\n\",\n second_str,\n filename,\n \"\\n\",\n third_str) )\n return d_header" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
retrieve 3D alignment parameters from the header phi theta psi tx ty tz mirror scale
def get_params3D(ima, xform = "xform.align3d"): t = ima.get_attr(xform) d = t.get_params("spider") return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"]
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def hdifferentZ(header, nz_fin, crpix3=1, crval3=1, cunit3='#', cdelt3=1,\n ctype3='cube index'):\n hplane=header\n hplane['NAXIS3'] = nz_fin\n hplane['CRPIX3'] = crpix3\n hplane['CRVAL3'] = crval3\n hplane['CUNIT3'] = cunit3\n hplane['CDELT3'] = cdelt3\n hplane['CD1_3'] = 0.\n hplane['CD2_3'] = 0.\n hplane['CD3_3'] = cdelt3\n hplane['CD3_1'] = 0.\n hplane['CD3_2'] = 0.\n hplane['CTYPE3'] = ctype3\n\n return hplane", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def get_harr_random_u3_params():\n decomp = qiskit.quantum_info.synthesis.OneQubitEulerDecomposer(basis='U3')\n haar_random = qiskit.quantum_info.random_unitary(2).data\n\n return decomp.angles(haar_random)", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def get_parameters(fname):\r\n missing = np.nan\r\n c3d = ezc3d.c3d(fname).c3d_swig\r\n c = c3d.parameters().group\r\n units_all = {'Point' : c('POINT').parameter('UNITS').valuesAsString()[0],\r\n 'Mass': 'kg', 'Length': 'm', 'Time': 's', 'g': 9.80665}\r\n if c('POINT').isParameter('ANGLE_UNITS') and c('POINT').isParameter('FORCE_UNITS'):\r\n units_all.update({'Angle' : c('POINT').parameter('ANGLE_UNITS').valuesAsString()[0],\r\n 'Force' : c('POINT').parameter('FORCE_UNITS').valuesAsString()[0],\r\n 'Moment' : c('POINT').parameter('MOMENT_UNITS').valuesAsString()[0],\r\n 'Power' : c('POINT').parameter('POWER_UNITS').valuesAsString()[0]\r\n })\r\n else:\r\n units_all.update({'Angle' : '', 'Force' : '',\r\n 'Moment' : '', 'Power' : ''})\r\n print('{} does not have ANGLE_UNITS.'.format(fname))\r\n if units_all['Point'] == 'cm':\r\n scale = .01\r\n elif units_all['Point'] == 'mm':\r\n scale = .001\r\n else:\r\n scale = 1\r\n units_all['scale'] = scale\r\n if (c3d.parameters().isGroup('ANALYSIS') and\r\n c('ANALYSIS').isParameter('NAMES') and\r\n c('ANALYSIS').isParameter('UNITS')):\r\n units_all.update(dict(zip(c('ANALYSIS').parameter('NAMES').\r\n valuesAsString(),\r\n c('ANALYSIS').parameter('UNITS').\r\n valuesAsString())))\r\n else:\r\n #print('{} does not have ANALYSIS.'.format(fname))\r\n pass\r\n LL, FL = {'L': np.nan, 'R': np.nan}, {'L': np.nan, 'R': np.nan}\r\n if c3d.parameters().isGroup('PROCESSING'):\r\n if c('PROCESSING').isParameter('Bodymass'):\r\n mass = np.round(c('PROCESSING').parameter('Bodymass').\r\n valuesAsDouble()[0], 3)\r\n if c('PROCESSING').isParameter('Height'):\r\n height = np.round(c('PROCESSING').parameter('Height').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if (c('PROCESSING').isParameter('UpperLegLength') and\r\n c('PROCESSING').isParameter('LowerLegLength')):\r\n LL['L'] = np.round((c('PROCESSING').parameter('UpperLegLength').\r\n valuesAsDouble()[0] +\r\n c('PROCESSING').parameter('LowerLegLength').\r\n valuesAsDouble()[0])*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LLegLength'):\r\n LL['L'] = np.round(c('PROCESSING').parameter('LLegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LegLength'):\r\n LL['L'] = np.round(c('PROCESSING').parameter('LegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if (c('PROCESSING').isParameter('UpperLegLength') and\r\n c('PROCESSING').isParameter('LowerLegLength')):\r\n LL['R'] = np.round((c('PROCESSING').parameter('UpperLegLength').\r\n valuesAsDouble()[0] +\r\n c('PROCESSING').parameter('LowerLegLength').\r\n valuesAsDouble()[0])*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('RLegLength'):\r\n LL['R'] = np.round(c('PROCESSING').parameter('RLegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('LegLength'):\r\n LL['R'] = np.round(c('PROCESSING').parameter('LegLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if c('PROCESSING').isParameter('LFootLength'):\r\n FL['L'] = np.round(c('PROCESSING').parameter('LFootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('FootLength'):\r\n FL['L'] = np.round(c('PROCESSING').parameter('FootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n if c('PROCESSING').isParameter('RFootLength'):\r\n FL['R'] = np.round(c('PROCESSING').parameter('RFootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n elif c('PROCESSING').isParameter('FootLength'):\r\n FL['R'] = np.round(c('PROCESSING').parameter('FootLength').\r\n valuesAsDouble()[0]*units_all['scale'], 3)\r\n else:\r\n mass, height = np.nan, np.nan\r\n\r\n rates = {'p': c3d.header().frameRate(),\r\n 'a': c3d.header().frameRate() * c3d.header().nbAnalogByFrame()}\r\n frames = {'p': [c3d.header().firstFrame(), c3d.header().lastFrame()],\r\n 'a': [c3d.header().firstFrame() * c3d.header().nbAnalogByFrame(),\r\n c3d.header().lastFrame() * c3d.header().nbAnalogByFrame()]}\r\n\r\n events = get_events(fname, missing=missing)\r\n\r\n param = {'filename': os.path.splitext(os.path.basename(fname))[0],\r\n 'mass': mass, 'height': height, 'LL': LL, 'FL': FL,\r\n 'units_all': units_all,\r\n 'rates': rates, 'frames': frames, 'events': events}\r\n if (c3d.parameters().isGroup('ANALYSIS') and\r\n c('ANALYSIS').isParameter('NAMES') and\r\n c('ANALYSIS').isParameter('VALUES')):\r\n param.update(dict(zip(c('ANALYSIS').parameter('NAMES').valuesAsString(),\r\n np.round(c('ANALYSIS').parameter('VALUES').\r\n valuesAsDouble(), 3))))\r\n\r\n return param", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def extract3d(xaxis, yaxis, zaxis, dat3d, crd_sys, xvec,yvec, zvec, pad=0.):\n func = RegularGridInterpolator((xaxis, yaxis, zaxis), dat3d, \n method='linear', bounds_error=False, fill_value=pad)\n\n # convert x,y,z coordinates to spherical coordinates\n if crd_sys == 'car':\n profx = xvec\n profy = yvec\n profz = zvec\n elif crd_sys == 'sph':\n # radius\n profx = np.sqrt(xvec**2 + yvec**2 + zvec**2)\n\n # theta\n tvec = np.arctan2(zvec, np.sqrt(xvec**2 + yvec**2))\n reg = tvec < 0.\n tvec[reg] = tvec[reg] + 2.*np.pi\n profy = tvec\n\n # azimuth\n pvec = np.arctan2(yvec, xvec)\n reg = pvec < 0\n pvec[reg] = pvec[reg] + 2*np.pi\n profz = pvec\n\n nvec = len(xvec)\n prof = np.zeros([nvec], dtype=np.float64)\n for ii in range(nvec):\n prof[ii] = func([profx[ii], profy[ii], profz[ii]])\n\n return prof", "def _get_quaternion_data(self, msg):\n alpha, beta, gamma = PIDController.get_euler_angle_from_quat(msg.quaternion.w, msg.quaternion.x,\n msg.quaternion.y, msg.quaternion.z)\n self._actual_euler[\"alpha\"], self._actual_euler[\"beta\"], self._actual_euler[\"gamma\"] \\\n = alpha, beta, gamma", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def _transform3D(self, xyzhe):\n\n theta_x = -math.pi/2.0 + xyzhe[:,4] # elevation\n cx = torch.cos(theta_x)\n sx = torch.sin(theta_x)\n\n theta_z = -xyzhe[:,3] # heading\n cz = torch.cos(theta_z)\n sz = torch.sin(theta_z)\n\n T = torch.zeros(xyzhe.shape[0], 4, 4, device=self.args.device)\n T[:,0,0] = cz\n T[:,0,1] = -sz*cx\n T[:,0,2] = sz*sx\n T[:,0,3] = xyzhe[:,0] # x\n\n T[:,1,0] = sz\n T[:,1,1] = cz*cx\n T[:,1,2] = -cz*sx\n T[:,1,3] = xyzhe[:,1] # y\n\n T[:,2,0] = 0\n T[:,2,1] = sx\n T[:,2,2] = cx\n T[:,2,3] = xyzhe[:,2] # z\n\n T[:,3,3] = 1\n return T" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set 3D alignment parameters in the header phi theta psi tx ty tz mirror scale
def set_params3D(ima, p, xform = "xform.align3d"): t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2],"tx":p[3],"ty":p[4],"tz":p[5],"mirror":p[6],"scale":p[7]}) ima.set_attr(xform, t)
[ "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def setCameraRotation3D(ang):\n dislin.vup3d(ang)", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def setOptimizableVariables(self, TiltAlignmentParameters_, optimizableVariables):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n if self.optimizeMarkerPositions:\n # translation\n nopti += (ntilt) * 2\n\n # variable magnifications for projections, exclude scaling of reference image (S==1)\n if TiltAlignmentParameters_.dmag:\n nopti += ntilt - 1\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti += ntilt\n else:\n nopti += 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti += 1\n\n # nopti += ntilt\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n # check that number of variables is ok\n if len(optimizableVariables) != nopti:\n print(\"Length optimizableVariables: \" + str(len(optimizableVariables)))\n print(\"N optmization: \" + str(nopti))\n raise IndexError('length of optimizableVariables does not match TiltAlignmentParameters')\n\n # marker 3D coords\n ivar = 0\n\n\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = numpy.array([optimizableVariables[ivar],\n optimizableVariables[ivar + 1], optimizableVariables[ivar + 2]])\n self._Markers[imark].set_r(r)\n\n ivar = ivar + 3\n\n\n if self.optimizeMarkerPositions:\n # translations\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #FFif (self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt):\n self._alignmentTransX[itilt] = optimizableVariables[ivar]\n self._alignmentTransY[itilt] = optimizableVariables[ivar + 1]\n ivar = ivar + 2\n\n\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if (int(self._projIndices[itilt]) != int(self._projIndices[self.ireftilt])):\n self._alignmentMagnifications[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n self._alignmentRotations[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n # all rotations are the same - take the first one\n else:\n self._alignmentRotations[0] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n self._alignmentBeamTilt = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n\n if not self.optimizeMarkerPositions:\n from pytom.scripts.Rotation_function import calculate_translation\n\n\n # r_model is the modelled x,y,z coordinate of the reference marker\n r_model = self._Markers[self.irefmark].get_r()\n\n # if using a reduced set using an indices existing in the reduced set\n # i = int(numpy.argwhere(self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0])\n psi_ref = numpy.deg2rad(numpy.mean(self._alignmentRotations) + 90)\n\n for iproj in range(0,ntilt):\n # setting variables\n marker = self._Markers[self.irefmark]\n r_exp_tilt = numpy.array([marker.get_xProj(iproj), marker.get_yProj(iproj)]) - numpy.array(\n self.TiltSeries_._TiltAlignmentParas.cent)\n psi_itilt = numpy.deg2rad(self._alignmentRotations[iproj] + 90)\n theta_itilt = numpy.deg2rad(self._tiltAngles[iproj])\n magnification =self._alignmentMagnifications[iproj]\n\n # calculating translation setting difference model and experimental reference marker point at 0\n tx, ty = calculate_translation(r_model, r_exp_tilt, psi_ref, psi_itilt, theta_itilt, magnification)\n\n\n self._alignmentTransX[iproj] = tx\n self._alignmentTransY[iproj] = ty\n\n\n\n # print(self.irefmark, self._alignmentTransX[self.ireftilt], self._alignmentTransY[self.ireftilt])\n # for itilt in range(ntilt):\n # self.q[itilt] = optimizableVariables[ivar]\n # ivar += 1", "def setViewAngle3D(ang):\n dislin.vang3d(ang)", "def _parametric(self):\n\t\tl, b = self.length, self.breadth\n\t\tself.track_length = 2*l + 2*b\n\t\tself.center_line = np.array([\n\t\t\t[0, l/2, l/2, -l/2, -l/2], \n\t\t\t[-b/2, -b/2, b/2, b/2, -b/2]\n\t\t\t])\n\t\tself.theta_track = np.array([0, l/2, l/2+b, l/2+l+b, l/2+l+2*b])", "def hdifferentZ(header, nz_fin, crpix3=1, crval3=1, cunit3='#', cdelt3=1,\n ctype3='cube index'):\n hplane=header\n hplane['NAXIS3'] = nz_fin\n hplane['CRPIX3'] = crpix3\n hplane['CRVAL3'] = crval3\n hplane['CUNIT3'] = cunit3\n hplane['CDELT3'] = cdelt3\n hplane['CD1_3'] = 0.\n hplane['CD2_3'] = 0.\n hplane['CD3_3'] = cdelt3\n hplane['CD3_1'] = 0.\n hplane['CD3_2'] = 0.\n hplane['CTYPE3'] = ctype3\n\n return hplane", "def _set_orientation(self, orientation):\n self.x = orientation['x']\n self.y = orientation['y']\n self.z = orientation['z']\n self.a = orientation['a']\n self.b = orientation['b']\n self.g = orientation['g']\n return", "def set_trans(self, head_mri_trans):\n x, y, z = -self.mri_origin[0]\n mri_tgt_trans = translation(x, y, z)\n head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)\n\n x, y, z = self.hsp.nasion[0]\n src_hsp_trans = translation(x, y, z)\n src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)\n\n rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])\n x, y, z = src_tgt_trans[:3, 3]\n\n self.rot_x = rot_x\n self.rot_y = rot_y\n self.rot_z = rot_z\n self.trans_x = x\n self.trans_y = y\n self.trans_z = z", "def set_MRI_orientation(self):\n\n if self.has_axes(MRI3Daxes):\n orientation = MRI3Daxes[:]\n if self.has_axis('time'):\n orientation += ['time']\n if self.has_axis('iteration'):\n orientation += ['iteration']\n if self.has_axis('condition'):\n orientation += ['condition']\n\n orientation += sorted(set(self.axes_names).difference(orientation))\n\n self.set_orientation(orientation)", "def set_all_vert_theta(self):\n for i in range(len(self.sample_locs)):\n self.sample_locs[i]['theta'] = self.theta.position\n self.sample_locs[i]['plate_x'] = self.plate_x.position\n self.sample_locs[i]['plate_y'] = self.plate_y.position\n self.sample_locs[i]['stage_z'] = self.stage_z.position", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z", "def changeReferenceTransposePosition3D(*args):\n return _almathswig.changeReferenceTransposePosition3D(*args)", "def setAxisLengths3D(x=2.,y=2.,z=2.):\n dislin.axis3d(x,y,z)", "def set_parameters(self):\n \n self.h_kappa = np.zeros(self.num_neurons, self.dtype) + 1.0\n \n self.h_delta = np.zeros(self.num_neurons, self.dtype) + 0.03\n \n self.h_bias = np.zeros(self.num_neurons, self.dtype) + 0.8\n self.h_sigma = np.zeros(self.num_neurons, self.dtype)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
retrieve projection alignment parameters from the header phi theta psi s2x s2y
def get_params_proj(ima, xform = "xform.projection"): t = ima.get_attr(xform) d = t.get_params("spider") return d["phi"],d["theta"],d["psi"],-d["tx"],-d["ty"]
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def parse_projection(self, header): # pragma: no cover\n pass", "def get_projection_from_qsa():\n projection_keys = request.args.get('projection', '')\n projection = {}\n if projection_keys:\n for k in projection_keys.split(','):\n projection[k] = 1\n\n return projection or None", "def stereographic_projection(phi_degree, psi_degree):\n psi_rad = psi_degree *np.pi/180\n psi_stereo = 2*np.tan(psi_rad/2)\n\n phi_rad = phi_degree *np.pi/180\n return phi_rad, psi_stereo", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def get_projection_params(fname):\n # Read strings from the region file\n with open(fname, 'r') as f:\n lines = f.readlines()\n wcs = lines[2][:-1] # Coordinate system TODO bad variable name\n if lines[0] != '# Region file format: DS9 version 4.1\\n':\n print 'Warning: potentially invalid region file!'\n print 'First line was: ' + lines[0]\n if wcs != 'fk5':\n raise Exception('Regions must be in sky (fk5) coordinates; got ' +\n wcs + 'instead')\n\n # Manipulate each string and save to list\n lines = filter(lambda x: '# projection' in x, lines[3:])\n projspecs = []\n for ln in lines:\n lnsplit = ln[2:-1].split(')') # Remove leading '#', trailing '\\n'\n if lnsplit[1] == '':\n r = lnsplit[0] + ')' # No optional arguments\n else:\n r = lnsplit[0] + ') #' + lnsplit[1] # Add octothorpe\n projspecs.append('%s; %s' % (wcs, r))\n\n return projspecs", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def __read_header(self):\n\n # These for loops are consuming a lot of energy !\n # optimise it...!\n\n print ('Reading header file...')\n fname = self.directory + '/SeisHeader_sem2d.hdr'\n data = pd.read_csv(fname, names=('dt','npts','nsta'), delim_whitespace=True, header=0, nrows=1)\n self.dt = data['dt'].values[0]\n self.npts = int(data['npts'].values[0])\n self.nsta = int(data['nsta'].values[0])\n # print (self.dt, self.npts, self.nsta)\n\n self.rcoord = np.zeros( (self.nsta, 2) )\n with open(fname, 'r') as f:\n data = pd.read_csv(fname, names=('x','z'), delim_whitespace=True, header=2, nrows=self.nsta)\n # print (data)\n self.rcoord[:,0] = ( data['x'].values )\n self.rcoord[:,1] = ( data['z'].values )\n \n try: \n line = pd.read_csv(fname, header=self.nsta+3, nrows=1, dtype=str, delim_whitespace=True)\n self.extra = line.columns[0]\n except:\n print ('--- No extra station ---')\n pass\n\n # to complete later...", "def find_parameters(f):\n \n lat_ts = 60.0 # Latitude of true scale\n\n # The number of grid cells\n Lm = len(f.dimensions['xi_rho'])-2\n Mm = len(f.dimensions['eta_rho'])-2\n #print \"Lm, Mm =\", Lm, Mm\n\n try:\n lon0 = f.variables['lon_rho'][0,0]\n lat0 = f.variables['lat_rho'][0,0]\n angle0 = f.variables['angle'][0,0]\n pm0 = f.variables['pm'][0,0]\n except KeyError as e:\n v = \"Missing essential variable %s in %s\" % (e, ROMSfile)\n raise KeyError(v)\n sys.exit(1)\n\n ylon = lon0 + angle0*deg\n ylon = round(ylon, 2)\n dx = 1.0/pm0 * (1 + np.sin(lat_ts*rad))/(1 + np.sin(lat0*rad))\n dx = round(dx, 2)\n\n # Make a grid mapping with origin at north pole\n gmap0 = gridmap.PolarStereographic(0.0, 0.0, dx, ylon, 1, 1)\n x, y = gmap0.ll2grid(lon0, lat0)\n xp = round(-x, 3)\n yp = round(-y, 3)\n\n return gridmap.PolarStereographic(xp, yp, dx, ylon, Lm, Mm)", "def __read_header(self):\n\n filename = self.directory + 'SeisHeader_sem2d.hdr'\n try :\n f = open(filename, 'r')\n except:\n msg = 'No Header file <SeisHeader_sem2d.hdr> in directory'\n print(msg)\n answer = input(\"Do you want to continue [Y/N] : \")\n if answer.upper() == 'Y':\n return\n else:\n sys.exit()\n\n f.readline()\n string = f.readline()\n header_line = string.rstrip(\" \").split()\n\n self.dt = float(header_line[0])\n self.npts = int(header_line[1])\n self.nsta = int(header_line[2])\n\n # Seismos\n f.readline()\n self.rcoord = np.zeros((self.nsta,2))\n for reciever in np.arange(self.nsta):\n string = f.readline()\n reciever_line = string.rstrip(\" \").split()\n # x-coord\n self.rcoord[reciever,0] = float(reciever_line[0])\n # z-coord\n self.rcoord[reciever,1] = float(reciever_line[1])\n\n #extra station\n try:\n xsta = int(f.readline())\n self.xsta = xsta\n f.readline()\n self.x_rcoord = np.zeros((xsta,2))\n\n for ex_reciever in range(xsta):\n xtra = f.readline()\n x_reciever_line = xtra.rstrip(\" \").split()\n self.x_rcoord[ex_reciever,0] = float(x_reciever_line[0])\n self.x_rcoord[ex_reciever,1] = float(x_reciever_line[0])\n except :\n print(\"No Extra recievers\")\n self.x_rcoord = None\n\n f.close()\n return self.dt, self.npts, self.nsta, self.rcoord, self.x_rcoord", "def get_phase_space(self, grid_flag):\n\n f = h5py.File(self.xs_path, 'r')\n self.N = f['paramdescrip']['NVALUE'].value # det maximum range Ni for each d_i\n phase_space = {}\n order = {}\n NPAR = f['paramdescrip']['NPAR'].value[0]\n for di in range(NPAR - 1):\n di_name = f['paramdescrip']['PARNAM'].value[di] # get names for dimensions. Starts at 0\n # get values for dimensions. Starts at 1. e.g. 'BURNUP': array([ 0., 9.35253143, 18.70503998,..\n # Is saved as a np.array, of floats64 FORTRAN-contiguous\n phase_space[di_name] = np.array([float(val) for val in f['paramvaleurs'][\n 'pval %d' % (di + 1)].value], order='F')\n order[di] = di_name # e.g. '7': 'BURNUP'\n\n iso_aux = []\n # just concatenate those two\n for iso in f['contenu']['NOMISO'].value[:]:\n iso_aux.append(iso)\n for iso in f['contenu']['NOMMAC'].value[:]:\n iso_aux.append(iso)\n f.close()\n self.iso_A2 = iso_aux\n\n # USER IMPOSED: Non-independant variables set to [0].\n \"\"\"\n *Do not eliminate them, this will bring problems with the cartesin product later one\n *if instead of '[phase_space['PHASE'][0]]' (which is equal to 1) just '[1]' is written then np.where() does not recognize the value.\n\n This two problems rise from the decision of defining the 'space of interest' as a subset from the 'phase space' which in time is read directly from the H5F file. Later several comparisons are made between the two. The upside is the need for no explicit declaration of the phase-space thus minimizing chances of un-noticed error in domain assignation.\n \"\"\"\n if 'PHASE' in phase_space.keys():\n phase_space['PHASE'] = [phase_space['PHASE'][0]]\n if 'BURNUPstep' in phase_space.keys():\n phase_space['BURNUPstep'] = [phase_space['BURNUPstep'][0]]\n\n if grid_flag == 'SG': # major update required\n \"\"\"\n In contras to FG, the stored values in the concatenated SAPHYB file only considers different burnup steps, i.e a set of values [0, 500, 500, 100] are stored as [0, 500, 100]. Two posibilities remain, read the BURNUP value from the single XS files separatly or load a pickeled object with the phase space. The second option was implemented.\n \"\"\"\n with open(self.file_path + self.xs_folder + 'phase_space.pickle', 'rb') as handle:\n phase_space_pk = pickle.load(handle)\n phase_space_pk.pop('a')\n phase_space_pk.pop('d')\n phase_space_pk.pop('l')\n phase_space_pk.pop('BURNUP_evol')\n phase_space_pk.pop('BURNUP_steps')\n phase_space = phase_space_pk\n\n self.phase_space, self.order, self.d, self.NPAR = phase_space, order, len(order), NPAR", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def ComputeGeometricParameters(self):\r\n # algebraic inner orinetation paramters\r\n x = self.__innerOrientationParameters\r\n tx = x['a0']\r\n ty = x['b0']\r\n tetha = np.arctan((x['b1'] / x['b2']))\r\n gamma = np.arctan((x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha))\r\n / (x['b1'] * np.sin(tetha) + x['b2'] * np.cos(tetha)))\r\n sx = x['a1'] * np.cos(tetha) - x['a2'] * np.sin(tetha)\r\n sy = (x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha)) / (np.sin(gamma))\r\n\r\n return {'translationX': tx, 'translationY': ty, 'rotationAngle': tetha,\r\n 'scaleFactorX': sx, 'scaleFactorY': sy, 'shearAngle': gamma}", "def get_probeLocs_calib_setup_cm(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*2.54, -4.25*2.54, 4.24*2.54, 4.24*2.54]\n y_pos = [-4.25*2.54, 4.24*2.54, 4.24*2.54, -4.25*2.54]\n z_pos = [-2.25*2.54, -0.75*2.54, 0.75*2.54, 2.25*2.54]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def calculateSipWcsHeader(wcs, order, bbox, spacing, header=None):\n transform = getPixelToIntermediateWorldCoords(wcs)\n crpix = wcs.getPixelOrigin()\n cdMatrix = wcs.getCdMatrix()\n crval = wcs.getSkyOrigin()\n gridNum = Extent2I(int(bbox.getWidth()/spacing + 0.5), int(bbox.getHeight()/spacing + 0.5))\n\n sip = SipApproximation(transform, crpix, cdMatrix, Box2D(bbox), gridNum, order)\n\n md = makeTanSipMetadata(sip.getPixelOrigin(), crval, sip.getCdMatrix(), sip.getA(), sip.getB(),\n sip.getAP(), sip.getBP())\n\n if header is not None:\n header.combine(md)\n else:\n header = md\n\n return header", "def _PhenomPCalculateModelParameters(self, p):\n\n\n logger.info(\"p['m1'] = {0}\".format(p['m1']))\n logger.info(\"p['m2'] = {0}\".format(p['m2']))\n if p['m1'] < p['m2']:\n raise ValueError('m1 = {0}, m2 = {1}. Convention error, this function needs m1 > m2'.format(p['m1'], p['m2']))\n\n #check that the spin magnitude is <=1\n if norm([p['chi1x'], p['chi1y'], p['chi1z']]) > 1.:\n raise ValueError('chi1 has a magnitude > 1')\n if norm([p['chi2x'], p['chi2y'], p['chi2z']]) > 1.:\n raise ValueError('chi2 has a magnitude > 1')\n\n m1_2 = p['m1']**2.\n m2_2 = p['m2']**2.\n\n #we start out in the Lhat = zhat frame\n #and define the spin w.r.t this frame.\n #Then, we incline the orbital frame w.r.t to the z-axis\n #by the angle inc.\n #This is done by a rotation about the y-axis, so the y-components do not change\n #in LAL this step is done in XLALSimInspiralInitialConditionsPrecessingApproxs in LALSimInspiralSpinTaylor.c\n #But it's simple so I just do it in this function.\n\n logger.info(\"spins before rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n p['chi1x'], p['chi1z'] = self.ROTATEY(p['inclination'], p['chi1x'], p['chi1z'])\n p['chi2x'], p['chi2z'] = self.ROTATEY(p['inclination'], p['chi2x'], p['chi2z'])\n\n logger.info(\"spins after rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n\n #from this we construct the orbital angular momentum\n #Again, this is a rotation about the y-axis.\n lnhatx = sin(p['inclination'])\n lnhaty = 0.\n lnhatz = cos(p['inclination'])\n\n chip, chi1_l, chi2_l = chip_fun(p['m1'], p['m2'], p['chi1x'], p['chi1y'], p['chi1z'], p['chi2x'], p['chi2y'], p['chi2z'], lnhatx, lnhaty, lnhatz)\n\n #compute L, J0 and orientation angles\n piM = Constants.LAL_PI * p['M_sec']\n v_ref = (piM * p['fRef'])**(1./3.)\n\n #Use 2PN approximation for initial L\n #magnitude of L\n L0 = p['Mtot']**2. * PhenomPL2PN(v_ref, p['eta'])\n\n #compute initial J\n #NOTE: we the spins need to be dimensionfull\n Jx0 = L0 * lnhatx + p['chi1x']*m1_2 + p['chi2x']*m2_2\n Jy0 = L0 * lnhaty + p['chi1y']*m1_2 + p['chi2y']*m2_2\n Jz0 = L0 * lnhatz + p['chi1z']*m1_2 + p['chi2z']*m2_2\n J0 = norm( [ Jx0, Jy0, Jz0 ] )\n\n #Compute thetaJ, the angle between J0 and line of sight (z-direction)\n if (J0 < 1e-10):\n logger.warning(\"Warning: |J0| < 1e-10. Setting thetaJ = 0.\\n\")\n thetaJ = 0.\n else:\n thetaJ = arccos(Jz0 / J0)\n\n #phiJ, We only use this angle internally since it is degenerate with alpha0.\n #NOTE:\n #in C code\n #if (Jx0 < DBL_MIN && Jy0 < DBL_MIN)\n #I think the replacement is the same\n if (Jx0 <= 0. and Jy0 <= 0.):\n phiJ = 0.\n else:\n phiJ = arctan2(Jy0, Jx0) #Angle of J0 in the plane of the sky\n #NOTE: Compared to the similar code in SpinTaylorF2 we have defined phiJ as the angle between the positive\n #(rather than the negative) x-axis and the projection of J0, since this is a more natural definition of the angle.\n #We have also renamed the angle from psiJ to phiJ.\n\n #Rotate Lnhat back to frame where J is along z and the line of\n #sight in the Oxz plane with >0 projection in x, to figure out initial alpha\n #The rotation matrix is\n #{\n #{-cos(thetaJ)*cos(phiJ), -cos(thetaJ)*sin(phiJ), sin(thetaJ)},\n #{sin(phiJ), -cos(phiJ), 0},\n #{cos(phiJ)*sin(thetaJ), sin(thetaJ)*sin(phiJ),cos(thetaJ)}\n #}\n\n rotLx = -lnhatx*cos(thetaJ)*cos(phiJ) - lnhaty*cos(thetaJ)*sin(phiJ) + lnhatz*sin(thetaJ)\n rotLy = lnhatx*sin(phiJ) - lnhaty*cos(phiJ)\n if (rotLx == 0.0 and rotLy == 0.0):\n alpha0 = 0.0\n else:\n alpha0 = arctan2(rotLy, rotLx)\n\n logger.info(\"chi1_l = {0}, chi2_l = {1}, chip = {2}, thetaJ = {3}, alpha0 = {4},\".format(chi1_l, chi2_l, chip, thetaJ, alpha0))\n\n return {\"chi1_l\" : chi1_l, \"chi2_l\" : chi2_l, \"chip\": chip, \"thetaJ\" : thetaJ, \"alpha0\" : alpha0}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set projection alignment parameters in the header phi theta psi s2x s2y
def set_params_proj(ima, p, xform = "xform.projection"): from EMAN2 import Vec2f t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2]}) t.set_trans(Vec2f(-p[3], -p[4])) ima.set_attr(xform, t)
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def __pub_initial_position(self, x, y, theta):\n initpose = PoseWithCovarianceStamped()\n initpose.header.stamp = rospy.get_rostime()\n initpose.header.frame_id = \"map\"\n initpose.pose.pose.position.x = x\n initpose.pose.pose.position.y = y\n quaternion = self.__yaw_to_quat(theta)\n\n initpose.pose.pose.orientation.w = quaternion[0]\n initpose.pose.pose.orientation.x = quaternion[1]\n initpose.pose.pose.orientation.y = quaternion[2]\n initpose.pose.pose.orientation.z = quaternion[3]\n self.__initialpose_pub.publish(initpose)\n return", "def edit_header(self, header, key_stem, alt=''):\n if not self.singular:\n return # Can't do this for multiple coordinates\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n lon = np.fmod(self.longitude, self.two_pi)\n lat = self.latitude\n if lon < 0:\n lon += self.two_pi\n\n header[f'{key_stem}1{alt}'] = (\n lon.to('degree').value,\n \"The reference longitude coordinate (deg).\")\n\n header[f'{key_stem}2{alt}'] = (\n lat.to('degree').value,\n \"The reference latitude coordinate (deg).\")\n\n if alt != '':\n header['WCSAXES'] = 2, 'Number of celestial coordinate axes.'", "def test_sip_rot90(self):\n import astropy.wcs\n import astropy.io.fits\n import matplotlib.pyplot as plt\n \n # NIRCam header\n hdict = {'WCSAXES': 2,\n 'CRPIX1': 1023.898,\n 'CRPIX2': 1024.804,\n 'CD1_1': 1.1134906417684e-05,\n 'CD1_2': 1.3467824656321e-05,\n 'CD2_1': 1.3420087352956e-05,\n 'CD2_2': -1.1207451566394e-05,\n 'CDELT1': 1.0,\n 'CDELT2': 1.0,\n 'CUNIT1': 'deg',\n 'CUNIT2': 'deg',\n 'CTYPE1': 'RA---TAN-SIP',\n 'CTYPE2': 'DEC--TAN-SIP',\n 'CRVAL1': 214.96738973536,\n 'CRVAL2': 52.90902634452,\n 'LONPOLE': 180.0,\n 'LATPOLE': 52.90902634452,\n 'MJDREF': 0.0,\n 'RADESYS': 'ICRS',\n 'A_ORDER': 5,\n 'A_0_2': -1.5484928795603e-06,\n 'A_0_3': -8.2999183617139e-12,\n 'A_0_4': -5.7190213371208e-15,\n 'A_0_5': 4.07621451394597e-18,\n 'A_1_1': -1.1539811084739e-05,\n 'A_1_2': 1.5338981510392e-09,\n 'A_1_3': -5.0844995554426e-14,\n 'A_1_4': 7.75217206451159e-17,\n 'A_2_0': 1.90893037341374e-06,\n 'A_2_1': -9.4268503941122e-11,\n 'A_2_2': 3.36511405738904e-15,\n 'A_2_3': 1.01913003404162e-17,\n 'A_3_0': 1.43597235355948e-09,\n 'A_3_1': -4.6987250431983e-14,\n 'A_3_2': 1.57099611614835e-16,\n 'A_4_0': 1.02466627979516e-14,\n 'A_4_1': 8.67685711756312e-18,\n 'A_5_0': 8.38989007060477e-17,\n 'B_ORDER': 5,\n 'B_0_2': -6.7240563576112e-06,\n 'B_0_3': 1.56045160756803e-09,\n 'B_0_4': -3.1953236128479e-14,\n 'B_0_5': 6.10311418213124e-17,\n 'B_1_1': 3.60929897034643e-06,\n 'B_1_2': -1.0268629323178e-10,\n 'B_1_3': 1.71406384407525e-14,\n 'B_1_4': 8.84616612338102e-18,\n 'B_2_0': 4.89531710581344e-06,\n 'B_2_1': 1.39303581747843e-09,\n 'B_2_2': -8.0509590680251e-15,\n 'B_2_3': 1.49127030961831e-16,\n 'B_3_0': -1.337273982495e-11,\n 'B_3_1': 1.81206781900953e-14,\n 'B_3_2': 1.17056127262783e-17,\n 'B_4_0': 2.30644152575784e-14,\n 'B_4_1': 8.12712563368295e-17,\n 'B_5_0': 4.96075368013397e-18,\n 'NAXIS': 2,\n 'NAXIS1': 2048,\n 'NAXIS2': 2048}\n\n h = astropy.io.fits.Header()\n for k in hdict:\n h[k] = hdict[k]\n\n wcs = astropy.wcs.WCS(h, relax=True)\n\n for rot in range(-5,6):\n _ = utils.sip_rot90(wcs, rot, compare=False, verbose=False)\n\n orig = utils.to_header(wcs)\n\n xp = [356, 1024]\n\n # Rotate 90 degrees twice\n new, new_wcs, desc = utils.sip_rot90(orig, 1,\n compare=False, verbose=False)\n new2, new2_wcs, desc2 = utils.sip_rot90(new, 1,\n compare=False, verbose=False)\n\n # Rotate 180\n new2b, new2b_wcs, desc2b = utils.sip_rot90(orig, 2,\n compare=False, verbose=False)\n\n # Test coordinates\n rd = wcs.all_pix2world(np.atleast_2d(xp), 1)\n rd1 = new_wcs.all_pix2world(np.atleast_2d([xp[1], 2048-xp[0]]), 1)\n assert(np.allclose(rd, rd1))\n\n rd2 = new2b_wcs.all_pix2world(2048-np.atleast_2d(xp), 1)\n assert(np.allclose(rd, rd2))\n\n # Back to start\n newx, newx_wcs, descx = utils.sip_rot90(new2b, 2,\n compare=False, verbose=False)\n\n for i in range(new['A_ORDER']+1):\n for j in range(new['B_ORDER']+1):\n Aij = f'A_{i}_{j}'\n Bij = f'B_{i}_{j}'\n if Aij not in new:\n continue\n\n assert(np.allclose(new2[Aij], new2b[Aij]))\n assert(np.allclose(new2[Bij], new2b[Bij]))\n assert(np.allclose(newx[Aij], orig[Aij]))\n assert(np.allclose(newx[Bij], orig[Bij]))\n\n print('sip_rot90 tests passed')\n \n plt.close('all')", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def _PhenomPCalculateModelParameters(self, p):\n\n\n logger.info(\"p['m1'] = {0}\".format(p['m1']))\n logger.info(\"p['m2'] = {0}\".format(p['m2']))\n if p['m1'] < p['m2']:\n raise ValueError('m1 = {0}, m2 = {1}. Convention error, this function needs m1 > m2'.format(p['m1'], p['m2']))\n\n #check that the spin magnitude is <=1\n if norm([p['chi1x'], p['chi1y'], p['chi1z']]) > 1.:\n raise ValueError('chi1 has a magnitude > 1')\n if norm([p['chi2x'], p['chi2y'], p['chi2z']]) > 1.:\n raise ValueError('chi2 has a magnitude > 1')\n\n m1_2 = p['m1']**2.\n m2_2 = p['m2']**2.\n\n #we start out in the Lhat = zhat frame\n #and define the spin w.r.t this frame.\n #Then, we incline the orbital frame w.r.t to the z-axis\n #by the angle inc.\n #This is done by a rotation about the y-axis, so the y-components do not change\n #in LAL this step is done in XLALSimInspiralInitialConditionsPrecessingApproxs in LALSimInspiralSpinTaylor.c\n #But it's simple so I just do it in this function.\n\n logger.info(\"spins before rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n p['chi1x'], p['chi1z'] = self.ROTATEY(p['inclination'], p['chi1x'], p['chi1z'])\n p['chi2x'], p['chi2z'] = self.ROTATEY(p['inclination'], p['chi2x'], p['chi2z'])\n\n logger.info(\"spins after rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n\n #from this we construct the orbital angular momentum\n #Again, this is a rotation about the y-axis.\n lnhatx = sin(p['inclination'])\n lnhaty = 0.\n lnhatz = cos(p['inclination'])\n\n chip, chi1_l, chi2_l = chip_fun(p['m1'], p['m2'], p['chi1x'], p['chi1y'], p['chi1z'], p['chi2x'], p['chi2y'], p['chi2z'], lnhatx, lnhaty, lnhatz)\n\n #compute L, J0 and orientation angles\n piM = Constants.LAL_PI * p['M_sec']\n v_ref = (piM * p['fRef'])**(1./3.)\n\n #Use 2PN approximation for initial L\n #magnitude of L\n L0 = p['Mtot']**2. * PhenomPL2PN(v_ref, p['eta'])\n\n #compute initial J\n #NOTE: we the spins need to be dimensionfull\n Jx0 = L0 * lnhatx + p['chi1x']*m1_2 + p['chi2x']*m2_2\n Jy0 = L0 * lnhaty + p['chi1y']*m1_2 + p['chi2y']*m2_2\n Jz0 = L0 * lnhatz + p['chi1z']*m1_2 + p['chi2z']*m2_2\n J0 = norm( [ Jx0, Jy0, Jz0 ] )\n\n #Compute thetaJ, the angle between J0 and line of sight (z-direction)\n if (J0 < 1e-10):\n logger.warning(\"Warning: |J0| < 1e-10. Setting thetaJ = 0.\\n\")\n thetaJ = 0.\n else:\n thetaJ = arccos(Jz0 / J0)\n\n #phiJ, We only use this angle internally since it is degenerate with alpha0.\n #NOTE:\n #in C code\n #if (Jx0 < DBL_MIN && Jy0 < DBL_MIN)\n #I think the replacement is the same\n if (Jx0 <= 0. and Jy0 <= 0.):\n phiJ = 0.\n else:\n phiJ = arctan2(Jy0, Jx0) #Angle of J0 in the plane of the sky\n #NOTE: Compared to the similar code in SpinTaylorF2 we have defined phiJ as the angle between the positive\n #(rather than the negative) x-axis and the projection of J0, since this is a more natural definition of the angle.\n #We have also renamed the angle from psiJ to phiJ.\n\n #Rotate Lnhat back to frame where J is along z and the line of\n #sight in the Oxz plane with >0 projection in x, to figure out initial alpha\n #The rotation matrix is\n #{\n #{-cos(thetaJ)*cos(phiJ), -cos(thetaJ)*sin(phiJ), sin(thetaJ)},\n #{sin(phiJ), -cos(phiJ), 0},\n #{cos(phiJ)*sin(thetaJ), sin(thetaJ)*sin(phiJ),cos(thetaJ)}\n #}\n\n rotLx = -lnhatx*cos(thetaJ)*cos(phiJ) - lnhaty*cos(thetaJ)*sin(phiJ) + lnhatz*sin(thetaJ)\n rotLy = lnhatx*sin(phiJ) - lnhaty*cos(phiJ)\n if (rotLx == 0.0 and rotLy == 0.0):\n alpha0 = 0.0\n else:\n alpha0 = arctan2(rotLy, rotLx)\n\n logger.info(\"chi1_l = {0}, chi2_l = {1}, chip = {2}, thetaJ = {3}, alpha0 = {4},\".format(chi1_l, chi2_l, chip, thetaJ, alpha0))\n\n return {\"chi1_l\" : chi1_l, \"chi2_l\" : chi2_l, \"chip\": chip, \"thetaJ\" : thetaJ, \"alpha0\" : alpha0}", "def set_phi(self,phi):\n\t\tr=self.r\n\t\tself.x = np.cos(np.deg2rad(phi))*r\n\t\tself.y = np.sin(np.deg2rad(phi))*r", "def set_all_vert_theta(self):\n for i in range(len(self.sample_locs)):\n self.sample_locs[i]['theta'] = self.theta.position\n self.sample_locs[i]['plate_x'] = self.plate_x.position\n self.sample_locs[i]['plate_y'] = self.plate_y.position\n self.sample_locs[i]['stage_z'] = self.stage_z.position", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def map_sim_positions(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, ax1 = plt.subplots(figsize=(10,10))\n # p.gal_index = np.where(GR.file_name == 'z0.00_G7169_cG29270')[0][0]\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n # print('TEST!',gal_ob.file_name,p.gal_index)\n simdata = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n\n # Plot\n print(simdata.head())\n ax1.plot(simdata.x,simdata.y,'o',ms=2,mew=2)\n\n print(gal_ob.radius)\n # Limit axes limits a bit to avoid area with no particles...\n # ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])make_projec\n # ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')", "def set_transform_param(self, name, pose_tup):\n rospy.set_param(name, {\"position\":pose_tup[0], \"orientation\":pose_tup[1]})", "def parse_projection(self, header): # pragma: no cover\n pass", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def _parametric(self):\n\t\tl, b = self.length, self.breadth\n\t\tself.track_length = 2*l + 2*b\n\t\tself.center_line = np.array([\n\t\t\t[0, l/2, l/2, -l/2, -l/2], \n\t\t\t[-b/2, -b/2, b/2, b/2, -b/2]\n\t\t\t])\n\t\tself.theta_track = np.array([0, l/2, l/2+b, l/2+l+b, l/2+l+2*b])", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
recover numerical values of CTF parameters from EMAN2 CTF object stored in a header of the input image
def get_ctf(ima): from EMAN2 import EMAN2Ctf ctf_params = ima.get_attr("ctf") return ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang
[ "def extract_feature(fileName,pca_params,n1,n2):\n # Get kernel and bais\n kernel0 = np.array(pca_params['Layer_0/kernel'])\n kernel1 = np.array(pca_params['Layer_1/kernel'])\n bias1 = pca_params['Layer_1/bias'].astype(np.float32)\n # print(bias1)\n # print('kernel0 shape: ',kernel0.shape)\n # print('kernel1 shape: ',kernel1.shape)\n # print('bias1 shape',bias1.shape)\n\n # Read image\n try:\n img = cv2.imread(fileName,0)\n except:\n print('File ' + fileName + ' not found')\n # img = img/255.\n # Extract features\n features = view_as_windows(img,(4,4),step=(4,4)).reshape(8,8,1*4**2)\n # print(features.shape)\n features = np.dot(features,np.transpose(kernel0))\n # print(features.shape)\n features = view_as_windows(features.copy(),(4,4,1),step=(4,4,1))\n # print(features.shape)\n features = features.reshape(2,2,n1*16)\n # print(features.shape)\n features = features + 1/np.sqrt(n1*n2) * bias1\n # print(features.shape)\n features = np.dot(features,np.transpose(kernel1))\n # print(features.shape)\n\n return img,features", "def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def getParametersfromHDR(self, inHeaderFile):\n if(os.path.isfile(inHeaderFile)):\n datatype = 'INTEGER*2'\n try:\n parFile = open(inHeaderFile, 'rU') \n for eachLine in parFile:\n #print eachLine\n count = eachLine.count('=')\n #print 'count = ' + str(count)\n if(count == 1):\n elements = eachLine.split('=', count)\n elements[0] = elements[0].strip()\n elements[1] = elements[1].strip()\n if elements[0] == 'samples':\n self.headerparameters.append(elements[1])\n elif elements[0] == 'lines':\n self.headerparameters.append(elements[1])\n elif elements[0] == 'data type': \n datatypeENVI = int(elements[1])\n elif elements[0] == 'map info':\n elements[1] = re.sub('\\{','',elements[1])\n elements[1] = re.sub('\\}','',elements[1])\n elements[1] = re.sub('\\s+','',elements[1])\n \n count = elements[1].count(',')\n mapElements = elements[1].split(',', count)\n i = 0\n while i < count:\n self.headerparameters.append(mapElements[i])\n i = i + 1\n \n parFile.close()\n \n # Convert ENVI to Gamma data type\n datatype = self.envi2gammaDataType(datatypeENVI)\n \n self.headerparameters.append(datatype)\n \n except IOError as e:\n print('\\nCould not open file: ', e)\n raise IOError(e)\n else:\n raise BaseException", "def read_cfin_t1():\n files, folder = fetch_cfin_multib()\n img = nib.load(pjoin(folder, 'T1.nii'))\n return img # , gtab", "def failover_cbf(cbf_file):\n\n header = {}\n\n header[\"two_theta\"] = 0.0\n\n for record in open(cbf_file):\n\n if \"_array_data.data\" in record:\n break\n\n if \"PILATUS 2M\" in record:\n header[\"detector_class\"] = \"pilatus 2M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (1679, 1475)\n continue\n\n if \"PILATUS3 2M\" in record:\n header[\"detector_class\"] = \"pilatus 2M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (1679, 1475)\n continue\n\n if \"PILATUS 6M\" in record:\n header[\"detector_class\"] = \"pilatus 6M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (2527, 2463)\n continue\n\n if \"PILATUS3 6M\" in record:\n header[\"detector_class\"] = \"pilatus 6M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (2527, 2463)\n continue\n\n if \"Start_angle\" in record:\n header[\"phi_start\"] = float(record.split()[-2])\n continue\n\n if \"Angle_increment\" in record:\n header[\"phi_width\"] = float(record.split()[-2])\n continue\n\n if \"Exposure_period\" in record:\n header[\"exposure_time\"] = float(record.split()[-2])\n continue\n\n if \"Silicon sensor\" in record:\n header[\"sensor\"] = 1000 * float(record.split()[4])\n continue\n\n if \"Count_cutoff\" in record:\n header[\"saturation\"] = int(record.split()[2])\n continue\n\n if \"Detector_distance\" in record:\n header[\"distance\"] = 1000 * float(record.split()[2])\n continue\n\n if \"Wavelength\" in record:\n header[\"wavelength\"] = float(record.split()[-2])\n continue\n\n if \"Pixel_size\" in record:\n header[\"pixel\"] = (\n 1000 * float(record.split()[2]),\n 1000 * float(record.split()[5]),\n )\n continue\n\n if \"Beam_xy\" in record:\n\n # N.B. this is swapped again for historical reasons\n\n beam_pixels = map(\n float,\n record.replace(\"(\", \"\").replace(\")\", \"\").replace(\",\", \"\").split()[2:4],\n )\n header[\"beam\"] = (\n beam_pixels[1] * header[\"pixel\"][1],\n beam_pixels[0] * header[\"pixel\"][0],\n )\n header[\"raw_beam\"] = (\n beam_pixels[1] * header[\"pixel\"][1],\n beam_pixels[0] * header[\"pixel\"][0],\n )\n continue\n\n # try to get the date etc. literally.\n\n try:\n datestring = record.split()[-1].split(\".\")[0]\n format = \"%Y-%b-%dT%H:%M:%S\"\n struct_time = time.strptime(datestring, format)\n header[\"date\"] = time.asctime(struct_time)\n header[\"epoch\"] = time.mktime(struct_time)\n\n except Exception:\n pass\n\n try:\n\n if not \"date\" in header:\n datestring = record.split()[-1].split(\".\")[0]\n format = \"%Y-%m-%dT%H:%M:%S\"\n struct_time = time.strptime(datestring, format)\n header[\"date\"] = time.asctime(struct_time)\n header[\"epoch\"] = time.mktime(struct_time)\n\n except Exception:\n pass\n\n try:\n\n if not \"date\" in header:\n datestring = record.replace(\"#\", \"\").strip().split(\".\")[0]\n format = \"%Y/%b/%d %H:%M:%S\"\n struct_time = time.strptime(datestring, format)\n header[\"date\"] = time.asctime(struct_time)\n header[\"epoch\"] = time.mktime(struct_time)\n\n except Exception:\n pass\n\n header[\"phi_end\"] = header[\"phi_start\"] + header[\"phi_width\"]\n\n return header", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def __readheader(self):\n vals=self.rffile.read(self.emiss_hdr_fmt)\n self.name,self.note,ione,self.nspec,self.start_date,self.start_time,self.end_date,self.end_time=vals[0:10],vals[10:70],vals[70],vals[71],vals[72],vals[73],vals[74],vals[75]\n \n self.name=Int2Asc(self.name)\n self.note=Int2Asc(self.note)\n self.rdum,rdum,self.iutm,self.xorg,self.yorg,self.delx,self.dely,self.nx,self.ny,self.nz,idum,self.idum,rdum,rdum,rdum=self.rffile.read(self.grid_hdr_fmt)\n\n if self.name=='EMISSIONS ':\n #Special case of gridded emissions\n #Seems to be same as avrg\n self.nlayers=1\n else:\n self.nlayers=self.nz\n self.ione,ione,nx,ny=self.rffile.read(self.cell_hdr_fmt)\n if not (self.nx,self.ny)==(nx,ny):\n raise ValueError(\"nx, ny defined first as %i, %i and then as %i, %i\" % (self.nx,self.ny,nx,ny))\n species_temp=self.rffile.read(self.nspec*self.spc_fmt)\n self.spcnames=[]\n for i in range(0,self.nspec*10,10):\n self.spcnames.append(Int2Asc(species_temp[i:i+10]))\n \n self.data_start_byte=self.rffile.record_start\n start_date,start_time,end_date,end_time=self.rffile.read(self.time_hdr_fmt)\n self.time_step=timediff((start_date,start_time),(end_date,end_time))\n self.time_step_count=int(timediff((self.start_date,self.start_time),(self.end_date,self.end_time),(2400,24)[int(self.time_step % 2)])//self.time_step)\n if self.name == 'AIRQUALITY':\n self.time_step_count = 1\n self.start_date = self.end_date\n self.record_size=self.rffile.record_size\n self.padded_size=self.record_size+8\n self.cell_count=(self.record_size-struct.calcsize(\"i10i\"))/struct.calcsize(self.data_fmt)\n self.record_fmt=(\"i10i\")+self.data_fmt*(self.cell_count)", "def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f", "def test_extract_parameters():\n with open(\"testDataPressure.dat\", 'r') as file:\n for line in file:\n pressure = float(line.split(\"mbar\")[0])\n R, M, ConvFactor = GlobalData.extract_parameters(pressure, 0.15)\n\n #assert R.n == pytest.approx(3.27536e-8, rel=float_relative_tolerance)\n #assert M.n == pytest.approx(3.23808e-19, rel=float_relative_tolerance)\n #assert ConvFactor.n == pytest.approx(190629, rel=float_relative_tolerance)\n \n #assert R.std_dev == pytest.approx(4.97914e-9, rel=float_relative_tolerance) \n #assert M.std_dev == pytest.approx(9.84496e-20, rel=float_relative_tolerance) \n #assert ConvFactor.std_dev == pytest.approx(58179.9, rel=float_relative_tolerance)\n\n return None", "def readIDCCoeffs(self, header):\n coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale',\n 'idcv2ref', 'idcv3ref', 'idctheta']\n for c in coeffs:\n self.__setattr__(c, header.get(c, None))", "def parse_cif(cif_name='iso.cif'):\n with open(cif_name) as f_iso:\n content = f_iso.readlines()\n u = np.zeros(6)\n for e in [line.strip().split() for line in content if len(line.strip().split()) == 2]:\n if 'cell_length_a' in e[0]:\n u[0] = float(e[1])\n elif 'cell_length_b' in e[0]:\n u[1] = float(e[1])\n elif 'cell_length_c' in e[0]:\n u[2] = float(e[1])\n elif 'cell_angle_alpha' in e[0]:\n u[3] = float(e[1])\n elif 'cell_angle_beta' in e[0]:\n u[4] = float(e[1])\n elif 'cell_angle_gamma' in e[0]:\n u[5] = float(e[1])\n a, b, c, alpha, beta, gamma = u\n cosdelta_up = np.cos(np.radians(alpha)) - np.cos(np.radians(beta))*np.cos(np.radians(gamma))\n cosdelta_low = np.sin(np.radians(beta))*np.sin(np.radians(gamma))\n cosdelta = cosdelta_up / cosdelta_low\n sindelta = np.sqrt(1-cosdelta**2)\n la = a*np.array([1.0, 0.0, 0.0])\n lb = b*np.array([np.cos(np.radians(gamma)), np.sin(np.radians(gamma)), 0.0])\n lc = c*np.array([np.cos(np.radians(beta)), np.sin(np.radians(beta))*cosdelta,\n np.sin(np.radians(beta))*sindelta])\n u_lc = lc/np.linalg.norm(lc)\n theta_c_rad = np.arccos(np.clip(np.dot(u_lc, [0, 0, 1]), -1.0, 1.0))\n return la, lb, lc, theta_c_rad", "def extract_anisotropy_features (Parameters, image, mask=None):\n \n data_inputs = {}\n \n Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC\n \n \n h, w, channels = image.shape\n \n if channels == 2:\n channel_types = [\"Para\", \"Perp\"]\n elif channels == 3:\n channel_types = [\"Open\", \"Para\", \"Perp\"]\n \n \n for index, channel in enumerate(channel_types):\n \n data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])\n\n\n #Additional parameters\n para_value = data_inputs['Para']\n perp_value = data_inputs['Perp']\n data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)\n \n #With corrections\n data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)\n \n\n \n return (data_inputs)", "def get_ocv_value(self):\n if 'EOC' in self.header.keys():\n return self.header['EOC']\n else:\n return None", "def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate EMAN2 CTF object using values of CTF parameters given in the list p
def generate_ctf(p): from EMAN2 import EMAN2Ctf defocus = p[0] cs = p[1] voltage = p[2] pixel_size = p[3] bfactor = p[4] amp_contrast = p[5] if defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention defocus *= 1e-4 if amp_contrast < 1.0: from math import sqrt amp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1) ctf = EMAN2Ctf() if(len(p) == 6): ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast}) else: ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast,'dfdiff':p[6],'dfang':p[7]}) return ctf
[ "def from_thermo(T_C, p):\n\ty = y_from_p(p)\n\tx = x_from_Tp(T_C+C_to_K, p)\n\treturn x, y", "def target_distribution_gen(name, parameter1, parameter2):\n if name==\"CHSH\":\n v = parameter2\n p = np.array([\n (-2 + np.sqrt(2) + (-1 + np.sqrt(2))*v)/(16.*(-2 + np.sqrt(2))),(2 + np.sqrt(2) + v + np.sqrt(2)*v)/(32 + 16*np.sqrt(2)),(-2 + np.sqrt(2) + v - np.sqrt(2)*v)/(16.*(-2 + np.sqrt(2))),\n (2 + np.sqrt(2) - (1 + np.sqrt(2))*v)/(16.*(2 + np.sqrt(2))),(2 + np.sqrt(2) + v + np.sqrt(2)*v)/(32 + 16*np.sqrt(2)),(-2 + np.sqrt(2) + (-1 + np.sqrt(2))*v)/(16.*(-2 + np.sqrt(2))),(2 + np.sqrt(2) - (1 + np.sqrt(2))*v)/(16.*(2 + np.sqrt(2))),\n (-2 + np.sqrt(2) + v - np.sqrt(2)*v)/(16.*(-2 + np.sqrt(2))),(-2 + np.sqrt(2) + (-1 + np.sqrt(2))*v)/(16.*(-2 + np.sqrt(2))),(2 + np.sqrt(2) + v + np.sqrt(2)*v)/(32 + 16*np.sqrt(2)),(-2 + np.sqrt(2) + v - np.sqrt(2)*v)/(16.*(-2 + np.sqrt(2))),\n (2 + np.sqrt(2) - (1 + np.sqrt(2))*v)/(16.*(2 + np.sqrt(2))),(2 + np.sqrt(2) - (1 + np.sqrt(2))*v)/(16.*(2 + np.sqrt(2))),(-2 + np.sqrt(2) + v - np.sqrt(2)*v)/(16.*(-2 + np.sqrt(2))),(2 + np.sqrt(2) + v + np.sqrt(2)*v)/(32 + 16*np.sqrt(2)),\n (-2 + np.sqrt(2) + (-1 + np.sqrt(2))*v)/(16.*(-2 + np.sqrt(2)))\n ])\n\n assert (np.abs(np.sum(p)-1.0) < (1E-6)),\"Improperly normalized p!\"\n return p", "def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)", "def set_ctf(ima, p):\n\tfrom utilities import generate_ctf\n\tctf = generate_ctf( p )\n\tima.set_attr( \"ctf\", ctf )", "def r2013_fe3(Cm, T, P):\n \n a = 0.22\n b = 3800\n c = -370\n d = -6.6 # feo\n e = 7.3 # al2o3\n f = 17.3 # cao\n g = 132.3 # na2o\n h = -147.8 # k2o\n i = 0.6 # p2o5\n j = -4.26\n\n Pgpa = P*1e-9 #GPa\n\n FO2 = gp.exp((gp.log(Cm['fe2o3']/Cm['feo']) - b/T - c*(Pgpa / T) - d*(Cm['feo'] + Cm['fe2o3']*0.8998) - e * Cm['al2o3'] - f * Cm['cao'] - g * Cm['na2o'] - h * Cm['k2o'] - i * Cm['p2o5'] - j)/a)\n \n return FO2", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def gen_params(no_cultures):\n # Plate level\n kn = 0.1 # Nutrient diffusion\n ks = 0.1 # Signal diffusion\n b = 0.05 # Signal on cells effect constant\n a = 0.05 # Signal secretion constant\n # Culture level\n # Growth rate constant\n r_mean = 1.0\n r_var = 1.0\n r_params = [max(0.0, gauss(r_mean, r_var)) for i in range(no_cultures)]\n params = np.array([kn, ks, b, a] + r_params)\n return params", "def gen_reaction(tabs):\n global pbeam\n pbeam = TLorentzVector(0, 0, Ebeam, Ebeam)\n global ptarg\n ptarg = TLorentzVector(0, 0, 0, m_proton)\n pinitial = pbeam + ptarg\n global s\n s = pinitial.Mag2()\n q_in = (s - m_proton**2) / (2 * math.sqrt(s))\n q_cm = math.sqrt((s - m_proton**2 + m_omega**2)**2 / (4 * s) - m_omega**2)\n EomegaCM = math.sqrt(m_omega**2 + q_cm**2)\n EprotonCM = math.sqrt(m_proton**2 + q_cm**2)\n costhetaCM = (2 * q_in * EomegaCM - m_omega**2 - tabs) / (2 * q_in * q_cm)\n if abs(costhetaCM) > 1:\n print \"tabs =\", tabs, \"is out of range, please try another value\"\n return 0\n costheta0 = random.Uniform(-1, 1)\n phi0 = random.Uniform(-math.pi, math.pi)\n costheta1 = random.Uniform(-1, 1)\n phi1 = random.Uniform(-math.pi, math.pi)\n pomega = gen_omega(costheta0, phi0, costheta1, phi1)\n sinthetaCM = math.sqrt(1 - costhetaCM**2)\n beta = TVector3(q_cm * sinthetaCM, 0, q_cm * costhetaCM) * (1 / EomegaCM)\n pomega.Boost(beta)\n pgamma[0].Boost(beta)\n pgamma[1].Boost(beta)\n pgamma[2].Boost(beta)\n global precoil\n precoil = TLorentzVector(-q_cm * sinthetaCM, 0, -q_cm * costhetaCM, EprotonCM)\n betaCM = pinitial.Vect() * (1 / pinitial[3])\n pgamma[0].Boost(betaCM)\n pgamma[1].Boost(betaCM)\n pgamma[2].Boost(betaCM)\n pomega.Boost(betaCM)\n precoil.Boost(betaCM)\n return pomega", "def coefficients(self,P,**kwargs):\n\n coefs = {}\n for mode in self.sur_dict:\n params = self.parameter_convert[mode](P,**kwargs) # very redundant, almost certainly the same for all modes in the surrogate.\n params_surrogate = self.sur_dict[mode].get_surr_params(params)\n if rosDebug:\n print(\" passing params to mode : \", mode, params)\n print(\" surrogate natural parameter is \", params_surrogate)\n\n # New version: gw-surrogate-0.5\n h_EIM = self.sur_dict[mode].eim_coeffs(params_surrogate, 'waveform_basis')\n # OLD VERSION: gw-surrogate-0.4.2 and earlier\n # x0= self.sur_dict[mode]._affine_mapper(params_surrogate)\n # amp_eval = self.sur_dict[mode]._amp_eval(x0)\n # phase_eval = self.sur_dict[mode]._phase_eval(x0)\n # norm_eval = self.sur_dict[mode]._norm_eval(x0)\n # h_EIM = np.zeros(len(amp_eval))\n # if self.sur_dict[mode].surrogate_mode_type == 'waveform_basis':\n # h_EIM = norm_eval*amp_eval*np.exp(1j*phase_eval)\n\n\n for indx in np.arange(self.nbasis_per_mode[mode]): \n how_to_store = (mode[0], mode[1], indx)\n coefs[how_to_store] = self.post_dict_complex_coef[mode](h_EIM[indx]) # conjugation as needed\n\n return coefs", "def _PhenomPCalculateModelParameters(self, p):\n\n\n logger.info(\"p['m1'] = {0}\".format(p['m1']))\n logger.info(\"p['m2'] = {0}\".format(p['m2']))\n if p['m1'] < p['m2']:\n raise ValueError('m1 = {0}, m2 = {1}. Convention error, this function needs m1 > m2'.format(p['m1'], p['m2']))\n\n #check that the spin magnitude is <=1\n if norm([p['chi1x'], p['chi1y'], p['chi1z']]) > 1.:\n raise ValueError('chi1 has a magnitude > 1')\n if norm([p['chi2x'], p['chi2y'], p['chi2z']]) > 1.:\n raise ValueError('chi2 has a magnitude > 1')\n\n m1_2 = p['m1']**2.\n m2_2 = p['m2']**2.\n\n #we start out in the Lhat = zhat frame\n #and define the spin w.r.t this frame.\n #Then, we incline the orbital frame w.r.t to the z-axis\n #by the angle inc.\n #This is done by a rotation about the y-axis, so the y-components do not change\n #in LAL this step is done in XLALSimInspiralInitialConditionsPrecessingApproxs in LALSimInspiralSpinTaylor.c\n #But it's simple so I just do it in this function.\n\n logger.info(\"spins before rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n p['chi1x'], p['chi1z'] = self.ROTATEY(p['inclination'], p['chi1x'], p['chi1z'])\n p['chi2x'], p['chi2z'] = self.ROTATEY(p['inclination'], p['chi2x'], p['chi2z'])\n\n logger.info(\"spins after rotation by {0} = \".format(p['inclination']))\n logger.info(\"chi1x = {0}, chi1y = {1}, chi1z = {2}\".format(p['chi1x'], p['chi1y'], p['chi1z']))\n logger.info(\"chi2x = {0}, chi2y = {1}, chi2z = {2}\".format(p['chi2x'], p['chi2y'], p['chi2z']))\n\n\n\n #from this we construct the orbital angular momentum\n #Again, this is a rotation about the y-axis.\n lnhatx = sin(p['inclination'])\n lnhaty = 0.\n lnhatz = cos(p['inclination'])\n\n chip, chi1_l, chi2_l = chip_fun(p['m1'], p['m2'], p['chi1x'], p['chi1y'], p['chi1z'], p['chi2x'], p['chi2y'], p['chi2z'], lnhatx, lnhaty, lnhatz)\n\n #compute L, J0 and orientation angles\n piM = Constants.LAL_PI * p['M_sec']\n v_ref = (piM * p['fRef'])**(1./3.)\n\n #Use 2PN approximation for initial L\n #magnitude of L\n L0 = p['Mtot']**2. * PhenomPL2PN(v_ref, p['eta'])\n\n #compute initial J\n #NOTE: we the spins need to be dimensionfull\n Jx0 = L0 * lnhatx + p['chi1x']*m1_2 + p['chi2x']*m2_2\n Jy0 = L0 * lnhaty + p['chi1y']*m1_2 + p['chi2y']*m2_2\n Jz0 = L0 * lnhatz + p['chi1z']*m1_2 + p['chi2z']*m2_2\n J0 = norm( [ Jx0, Jy0, Jz0 ] )\n\n #Compute thetaJ, the angle between J0 and line of sight (z-direction)\n if (J0 < 1e-10):\n logger.warning(\"Warning: |J0| < 1e-10. Setting thetaJ = 0.\\n\")\n thetaJ = 0.\n else:\n thetaJ = arccos(Jz0 / J0)\n\n #phiJ, We only use this angle internally since it is degenerate with alpha0.\n #NOTE:\n #in C code\n #if (Jx0 < DBL_MIN && Jy0 < DBL_MIN)\n #I think the replacement is the same\n if (Jx0 <= 0. and Jy0 <= 0.):\n phiJ = 0.\n else:\n phiJ = arctan2(Jy0, Jx0) #Angle of J0 in the plane of the sky\n #NOTE: Compared to the similar code in SpinTaylorF2 we have defined phiJ as the angle between the positive\n #(rather than the negative) x-axis and the projection of J0, since this is a more natural definition of the angle.\n #We have also renamed the angle from psiJ to phiJ.\n\n #Rotate Lnhat back to frame where J is along z and the line of\n #sight in the Oxz plane with >0 projection in x, to figure out initial alpha\n #The rotation matrix is\n #{\n #{-cos(thetaJ)*cos(phiJ), -cos(thetaJ)*sin(phiJ), sin(thetaJ)},\n #{sin(phiJ), -cos(phiJ), 0},\n #{cos(phiJ)*sin(thetaJ), sin(thetaJ)*sin(phiJ),cos(thetaJ)}\n #}\n\n rotLx = -lnhatx*cos(thetaJ)*cos(phiJ) - lnhaty*cos(thetaJ)*sin(phiJ) + lnhatz*sin(thetaJ)\n rotLy = lnhatx*sin(phiJ) - lnhaty*cos(phiJ)\n if (rotLx == 0.0 and rotLy == 0.0):\n alpha0 = 0.0\n else:\n alpha0 = arctan2(rotLy, rotLx)\n\n logger.info(\"chi1_l = {0}, chi2_l = {1}, chip = {2}, thetaJ = {3}, alpha0 = {4},\".format(chi1_l, chi2_l, chip, thetaJ, alpha0))\n\n return {\"chi1_l\" : chi1_l, \"chi2_l\" : chi2_l, \"chip\": chip, \"thetaJ\" : thetaJ, \"alpha0\" : alpha0}", "def __init__(self, coefficient, basefield=None):\n\n # parameter parse\n try:\n character = basefield.getCharacteristic()\n field = basefield\n except AttributeError:\n # backward compatibility\n if isinstance(basefield, int):\n field = finitefield.FinitePrimeField.getInstance(basefield)\n character = basefield\n else:\n raise ValueError(\"basefield must be FiniteField object.\")\n\n coeffs_list = []\n if isinstance(coefficient, list):\n for c in coefficient:\n if isinstance(c, int):\n coeff = field.createElement(c)\n elif c in field:\n coeff = c\n else:\n raise ValueError(\"coefficient not in basefield.\")\n coeffs_list.append(coeff)\n\n # general initialize\n ECGeneric.__init__(self, coeffs_list, field)\n\n zero = self.basefield.zero\n one = self.basefield.one\n\n # format attribute\n if self.ch == 2:\n if len(self) == 5:\n # FIXME\n if coeffs_list[0] % 2 == one and coeffs_list[2] % 2 == coeffs_list[3] % 2 == zero and coeffs_list[4]:\n self.a1 = one\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = one\n self.b4 = zero\n self.b6 = zero\n self.b8 = self.a6\n self.c4 = one\n self.c6 = one\n self.disc = self.a6\n self.j = self.disc.inverse()\n elif coeffs_list[0] % 2 == coeffs_list[1] % 2 == zero and coeffs_list[2]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = zero\n self.b6 = self.a3**2\n self.b8 = self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = self.a3**4\n self.j = zero\n else:\n raise ValueError(\"coefficient may be not representation of EC.\")\n else:\n raise ValueError(\"coefficient may only use full Weierstrass form for characteristic 2.\")\n elif self.ch == 3: # y^2=x^3+a2*x^2+a6 or y^2=x^3+a4*x+a6\n # FIXME\n if len(self) == 5:\n if coeffs_list[0] % 3 == coeffs_list[2] % 3 == coeffs_list[3] % 3 == 0 and coeffs_list[1] and coeffs_list[4]:\n self.a1 = zero\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = self.a2\n self.b4 = zero\n self.b6 = self.a6\n self.b8 = self.a2*self.a6\n self.c4 = self.b2**2\n self.c6 = 2*self.b2**3\n self.disc = -self.a2**3*self.a6\n self.j = (-self.a2**3)*self.a6.inverse()\n elif coeffs_list[0] == coeffs_list[1] == coeffs_list[2] == 0 and coeffs_list[3]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = 2*self.a4\n self.b6 = self.a6\n self.b8 = 2*self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = -self.a4**3\n self.j = zero\n else:\n raise ValueError(\"can't defined EC.\")\n if not self.disc:\n raise ValueError(\"this curve is singular.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n else:\n if len(self) == 5:\n self.a1 = coeffs_list[0]\n self.a2 = coeffs_list[1]\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n elif len(self) == 2:\n self.a = coeffs_list[0]\n self.b = coeffs_list[1]\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = self.a\n self.a6 = self.b\n self.b2 = zero\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -(self.a**2)\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n\n self.ord = None\n self.abelian = None\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4, 2:self.a2, 3:one},\n self.basefield)", "def __call__(self, annotion_list, back_word_list, p):\n batch_size = p.data.shape[0]\n exponential_list = []\n sum_exponential = XP.fzeros((batch_size, 1))\n # Calculate the total value list and total value\n # Prepare the Convoluation\n for annotion, back_word in zip(annotion_list, back_word_list):\n weight = functions.tanh(self.annotion_weight(annotion) + self.back_weight(back_word) + self.pw(p))\n exponential = functions.exp(self.weight_exponential(weight))\n exponential_list.append(exponential)\n sum_exponential += exponential\n ZEROS = XP.fzeros((batch_size, self.hidden_size))\n annotion_value = ZEROS\n back_word_value = ZEROS\n # Calculate the Convolution Value each annotion and back word\n for annotion, back_word, exponential in zip(annotion_list, back_word_list, exponential_list):\n exponential /= sum_exponential\n annotion_value += functions.reshape(functions.batch_matmul(annotion, exponential), (batch_size, self.hidden_size))\n back_word_value += functions.reshape(functions.batch_matmul(back_word, exponential), (batch_size, self.hidden_size))\n return annotion_value, back_word_value", "def crear_etiquetas_input(self, inputlist):\n\n for n, i in enumerate(inputlist):\n for eti in i['etiquetas']:\n self.fuzz_inputs[n][eti['nombre']] = getattr(generatemf, eti['mf'])(\n self.fuzz_inputs[n].universe, *eti['definicion'])", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def C_factory(P, n=2, V_type=\"clamped\"):\n\n # TODO: check that p_len is ok with the degree and > 0\n m = len(P) # the number of points in P\n D = len(P[0]) # the dimension of a point (2D, 3D)\n\n # Create the knot vector\n V = make_knot_vector(n, m, V_type)\n # TODO: check the validity of the input knot vector.\n # TODO: create an initial Vector Point.\n\n #############################################################################\n # The following line will be detailed later. #\n # We create the highest degree basis spline function, aka. our entry point. #\n # Using the recursive formulation of b-splines, this b_n will call #\n # lower degree basis_functions. b_n is a function. #\n #############################################################################\n b_n = basis_factory(n)\n\n @memoize\n def S(t, d):\n \"\"\" The b-spline funtion, as defined in eq. 3. \"\"\"\n out = 0.\n for i in range(m): #: Iterate over 0-indexed point indices\n out += P[i][d]*b_n(t, i, V)\n return out\n\n def C(t):\n \"\"\" The b-spline curve, as defined in eq. 4. \"\"\"\n out = [0.]*D #: For each t we return a list of D coordinates\n for d in range(D): #: Iterate over 0-indexed dimension indices\n out[d] = S(t,d)\n return out\n\n C.P = P #: The control polygone\n C.V = V #: The knot vector used by the function\n C.spline = S #: The spline function.\n C.basis = b_n #: The highest degree basis function. Useful to do some plotting.\n C.min = V[0] #: The domain of definition of the function, lower bound for t\n C.max = V[-1] #: The domain of definition of the function, upper bound for t\n C.endpoint = C.max!=V[-1] #: Is the upper bound included in the domain.\n return C", "def metamer(p):\r\n return Components(p, Scale=3)", "def generate_CVRP(N, C, muC, sdC, regular=False, R=200.0):\n points = []\n demands = []\n points.append((0.0,0.0)) # Depot at 0,0\n demands.append(0)\n sumc = 0.0\n alpha = pi/4.0\n for _ in range(N):\n if regular:\n alpha+=(2*pi/N)\n r = R\n else:\n # Random angle\n alpha = random.random()*2*pi\n r = R*random.gauss(1.0, 0.33)\n pt_x = r*cos(alpha)\n pt_y = r*sin(alpha)\n c = min(C, max(1.0, random.gauss(muC, sdC)))\n sumc+=c\n points.append((pt_x, pt_y))\n demands.append(c)\n #points[0][2] = -sumc\n \n D = calculate_D(points)\n \n return ProblemDefinition(N,points,None,demands,D,C,None)", "def extract_ctf_params(self,ctfs:np.array,params:dict,signal:str,\n\t\t\t\t\t\t perm_idx:int,ch_idx:int=None)->dict:\n\n\t\tif ctfs.ndim == 3:\n\t\t\tnr_freqs, nr_samples_tr, nr_chan = ctfs.shape\n\t\t\tnr_samples_te = 1\n\t\t\tGAT = False\n\t\t\t# insert new dimension so that indexing does not crash\n\t\t\tctfs = ctfs[...,np.newaxis,:]\n\t\telse:\n\t\t\tnr_freqs, nr_samples_tr, nr_samples_te, nr_chan = ctfs.shape \n\t\t\tGAT = True\n\n\t\t# hack to deal with line breaks\n\t\tp = params\n\t\ts = signal \n\n\t\tfor f in range(nr_freqs):\n\t\t\tfor tr_s in range(nr_samples_tr):\n\t\t\t\tfor te_s in range(nr_samples_te):\n\t\n\t\t\t\t\tslopes = self.extract_slopes(ctfs[f,tr_s,te_s])\n\t\t\t\t\tif ch_idx is None:\n\t\t\t\t\t\tp[f'{s}_slopes'][perm_idx,f,tr_s,te_s] = slopes\n\t\t\t\t\telse:\n\t\t\t\t\t\tp[f'{s}_slopes'][perm_idx,f,tr_s,te_s,ch_idx] = slopes\n\t\t\t\t\tif any([key for key in params.keys() if 'amps' in key]):\n\t\t\t\t\t\t(amps, base,\n\t\t\t\t\t\tconc,mu,_) = self.fit_cos_to_ctf(ctfs[f,tr_s,te_s])\n\t\t\t\t\t\tif ch_idx is None:\n\t\t\t\t\t\t\tp[f'{s}_amps'][perm_idx,f,tr_s,te_s] = amps\n\t\t\t\t\t\t\tp[f'{s}_base'][perm_idx,f,tr_s,te_s] = base\n\t\t\t\t\t\t\tp[f'{s}_conc'][perm_idx,f,tr_s,te_s] = conc\n\t\t\t\t\t\t\tp[f'{s}_means'][perm_idx,f,tr_s,te_s] = mu\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tp[f'{s}_amps'][perm_idx,f,tr_s,te_s,ch_idx] = amps\n\t\t\t\t\t\t\tp[f'{s}_base'][perm_idx,f,tr_s,te_s,ch_idx] = base\n\t\t\t\t\t\t\tp[f'{s}_conc'][perm_idx,f,tr_s,te_s,ch_idx] = conc\n\t\t\t\t\t\t\tp[f'{s}_means'][perm_idx,f,tr_s,te_s,ch_idx] = mu\t\n\n\t\tparams = p\t\n\n\t\treturn params", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set EMAN2 CTF object in the header of input image using values of CTF parameters given in the list p
def set_ctf(ima, p): from utilities import generate_ctf ctf = generate_ctf( p ) ima.set_attr( "ctf", ctf )
[ "def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_contrast < 1.0:\n\t\tfrom math import sqrt\n\t\tamp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)\n\n\tctf = EMAN2Ctf()\n\tif(len(p) == 6):\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast})\n\telse:\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast,'dfdiff':p[6],'dfang':p[7]})\n\t\t\n\treturn ctf", "def update_header(vcf):\n # if Adding Fields to INFO field\n vcf.add_info_to_header(\n {'ID': 'OGT', 'Description': ''.join([\n 'Original Octopus GT fields for each sample before reassigning the GT value based on AR threshold (GT_0/1 < AR_',\n str(AR_threshold_for_GT), ' and GT_1/1 >= AR_', str(AR_threshold_for_GT), ' )']), 'Type': 'String', 'Number': '.'})\n \n # adding field to the FORMAT columns\n # NOTE: if the field already exist in the Header, it will not be replaced or update; You must rename the Field already present in the VCF to add specifically the\n # following fields to the vcf HEADER\n # Adding AR\n vcf.add_format_to_header(\n {'ID': 'AR', 'Description': 'Alt Allelic Ratios for each sample in same order as list of samples found in VCF beyond column FORMAT', 'Type': 'Float', 'Number': '1'})\n return vcf", "def update_vcf_header(header):\n for key in header.formats.keys():\n if key not in 'GT GQ'.split():\n header.formats.remove_header(key)\n\n header.add_line(\n '##FORMAT=<ID=RD_CN,Number=1,Type=Integer,Description=\"Predicted copy state\">')\n header.add_line(\n '##FORMAT=<ID=RD_GQ,Number=1,Type=Integer,Description=\"Read-depth genotype quality\">')\n header.add_line(\n '##FORMAT=<ID=PE_GT,Number=1,Type=Integer,Description=\"Paired-end genotype\">')\n header.add_line(\n '##FORMAT=<ID=PE_GQ,Number=1,Type=Integer,Description=\"Paired-end genotype quality\">')\n header.add_line(\n '##FORMAT=<ID=SR_GT,Number=1,Type=Integer,Description=\"Split-read genotype\">')\n header.add_line(\n '##FORMAT=<ID=SR_GQ,Number=1,Type=Integer,Description=\"Split read genotype quality\">')\n header.add_line(\n '##FORMAT=<ID=EV,Number=1,Type=Integer,Description=\"Classes of evidence supporting final genotype\">')\n\n header.add_line(\n '##INFO=<ID=MULTIALLELIC,Number=0,Type=Flag,Description=\"Multiallelic site\">')\n header.add_line(\n '##INFO=<ID=varGQ,Number=1,Type=Integer,Description=\"Variant genotype quality\">')", "def fcc(filename = '/mnt/hgfs/10_19_simple_shear/VPSC/sx/hijhiihb.sx',\r\n hii = 1.0, hij = 1.4, hb = -0.4,\r\n tau0 = 1.0, tau1 = 0.2, thet0 = 1.0, thet1 = 0.05,\r\n #tau0 = 1.045e2, tau1= 70., thet0 = 2.6e2, thet1 = 0.95e2,\r\n hpfac = 0., gndfac = 0., header = '** material info',\r\n c11 = 205., c12 = 138., c44 = 126.,\r\n #nmodesx = 2, modes =[1],\r\n iopsysx = 0,\r\n dependency = 'indv',\r\n #dependency = 'iso',\r\n nrsx = 20,\r\n twshx = 0., isectw = 0, thres1 = 0., thres2 = 0.,\r\n #interaction ='Bauschinger'\r\n interaction ='Bauschinger Latent'\r\n ):\r\n\r\n \r\n f = open(filename, 'w')\r\n f.writelines(header + '\\n')\r\n f.writelines('cubic \\n')\r\n f.writelines('%6.1f%6.1f%6.1f%6.1f%6.1f%6.1f'%(1.0, 1.0, 1.0, 90., 90., 90.,))\r\n f.writelines(' unit cell axes and angles \\n')\r\n\r\n # SINGLE CRYSTAL ELASTIC STIFFNESS MATRIX :\r\n f.writelines('** Elastic stiffness (single crystal [GPa]; scaled = 0.85 x interpolated)\\n')\r\n f.writelines(' %8.1f%8.1f%8.1f%8.1f%8.1f%8.1f \\n' %(c11, c12, c12, 0., 0., 0.))\r\n f.writelines(' %8.1f%8.1f%8.1f%8.1f%8.1f%8.1f \\n' %(c12, c11, c12, 0., 0., 0.))\r\n f.writelines(' %8.1f%8.1f%8.1f%8.1f%8.1f%8.1f \\n' %(c12, c12, c11, 0., 0., 0.))\r\n f.writelines(' %8.1f%8.1f%8.1f%8.1f%8.1f%8.1f \\n' %(0., 0., 0., c44, 0., 0.,))\r\n f.writelines(' %8.1f%8.1f%8.1f%8.1f%8.1f%8.1f \\n' %(0., 0., 0., 0., c44, 0. ))\r\n f.writelines(' %8.1f%8.1f%8.1f%8.1f%8.1f%8.1f \\n' %(0., 0., 0., 0., 0., c44))\r\n\r\n # THERMAL EXPANSION COEFFICIENTS (single crystal in crystal axis)\r\n f.writelines('** Thermal expansion coefficients (single crystal axis): \\n')\r\n f.writelines(' %9.1e%9.1e%9.1e%9.1e%9.1e%9.1e \\n' %( 10.0e-6,10.0e-6,10.0e-6, 0., 0., 0.))\r\n\r\n\r\n ## BASIC SLIP SYSTEM DECLARATION OF FCC STRUCTURE {111}<110>\r\n sn = []\r\n sb = []\r\n\r\n sn.append([])\r\n sb.append([])\r\n sb[0].append([])\r\n sb[0].append([])\r\n sb[0].append([])\r\n sn[0] = [1,1,1] \r\n sb[0][0] = [0,1,-1]\r\n sb[0][1] = [1, 0, -1]\r\n sb[0][2] = [1,-1, 0]\r\n\r\n sn.append([])\r\n sb.append([])\r\n sb[1].append([])\r\n sb[1].append([])\r\n sb[1].append([])\r\n sn[1] = [-1, 1, 1]\r\n sb[1][0] = [0, 1, -1]\r\n sb[1][1] = [1, 0, 1]\r\n sb[1][2] = [1, 1, 0]\r\n\r\n sn.append([])\r\n sb.append([])\r\n sb[2].append([])\r\n sb[2].append([])\r\n sb[2].append([])\r\n sn[2] = [-1, -1, 1]\r\n sb[2][0] = [0, 1, 1]\r\n sb[2][1] = [1, 0, 1]\r\n sb[2][2] = [1, -1, 0]\r\n\r\n sn.append([])\r\n sb.append([])\r\n sb[3].append([])\r\n sb[3].append([])\r\n sb[3].append([])\r\n sn[3] = [1, -1, 1]\r\n sb[3][0] = [0, 1, 1]\r\n sb[3][1] = [1, 0, -1]\r\n sb[3][2] = [1, 1, 0]\r\n\r\n #dependency !\r\n # dependency may be 'iso', 'indv'\r\n\r\n ##\r\n\r\n # INFORMATION ABOUT SLIP $ TWINNING MODES IN THIS FILE:\r\n\r\n #nmodesx = 2, modes =[1],\r\n modes = []\r\n if dependency =='iso':\r\n nmodesx = 1\r\n modes.append(1)\r\n elif dependency =='indv':\r\n if iopsysx == 0:\r\n nmodesx = len(sn)*len(sb[0][0]) * 2\r\n pass\r\n elif iopsysx == 1:\r\n nmodesx = len(sn)*len(sb[0][0])\r\n pass\r\n else: return -1\r\n for i in range(nmodesx):\r\n modes.append(i+1)\r\n\r\n \r\n f.writelines('** Slip and Twinning modes(system) information \\n')\r\n f.writelines( str(len(modes)).ljust(4) + ' nmodesx \\n')\r\n f.writelines( str(len(modes)).ljust(4) + ' nmodes \\n') \r\n for i in range(len(modes)):\r\n f.writelines(str(modes[i]).ljust(3) )\r\n f.writelines(' modes \\n')\r\n\r\n if dependency == 'iso' :\r\n \r\n \"\"\" iso case\"\"\"\r\n nsmx = len(sn)* len(sb[0][0])\r\n if iopsysx == 0: nsmx = nsmx * 2\r\n \r\n f.writelines(' {111}<110> slip \\n')\r\n f.writelines('%5i %5i %5i %5i' %(1, nsmx, nrsx, iopsysx))\r\n f.writelines(' modex, nsmx, nrsx, oiopsysx \\n')\r\n f.writelines(' %5.3f %1i %5.3f %5.3f ' %(twshx,isectw,thres1,thres2))\r\n f.writelines(' twshx,isectw,thres1,thres2 \\n')\r\n f.writelines(' %5.3e %5.3e %5.3e %5.3e %5.1f %5.1f \\n' %(tau0, tau1, thet0, thet1, hpfac, gndfac))\r\n f.writelines(' 1.0 h_latent \\n')\r\n for i in range(len(sn)):\r\n for j in range(len(sb[i])):\r\n if iopsysx == 1:\r\n f.writelines(' %2i %2i %2i %2i %2i %2i \\n' %(sn[i][0], sn[i][1], sn[i][2],\r\n sb[i][j][0], sb[i][j][1], sb[i][j][2]))\r\n elif iopsysx == 0:\r\n f.writelines(' %2i %2i %2i %2i %2i %2i \\n' %(sn[i][0], sn[i][1], sn[i][2],\r\n sb[i][j][0], sb[i][j][1], sb[i][j][2]))\r\n f.writelines(' %2i %2i %2i %2i %2i %2i \\n' %(sn[i][0], sn[i][1], sn[i][2],\r\n -sb[i][j][0],-sb[i][j][1],-sb[i][j][2]))\r\n\r\n if dependency == 'indv':\r\n \"\"\" individual crss evolution for each slip system \"\"\"\r\n nsmx = 1\r\n\r\n # interaction:\r\n # 'Bauschinger'\r\n # 'Latent'\r\n # 'Bauschinger Latent'\r\n hmat = []\r\n if iopsysx == 1: m=1\r\n elif iopsysx == 0: m=2\r\n if iopsysx ==1:\r\n for i in range (len(sn)*len(sb[0])*m):\r\n if interaction =='Latent':\r\n hmat.append(hij)\r\n pass\r\n elif interaction =='Bauschinger Latent':\r\n hmat.append(hij)\r\n pass\r\n elif interaction =='Bauschinger':\r\n hmat.append(hii)\r\n pass\r\n kount = 0\r\n for j in range(len(sn)):\r\n for k in range(len(sb[j])):\r\n if iopsysx == 1: #'Bauschinger impossible'\r\n f.writelines(' (%2i%2i%2i) <%2i%2i%2i> system \\n' %(sn[j][0], sn[j][1], sn[j][2],\r\n sb[j][k][0], sb[j][k][1], sb[j][k][2]))\r\n\r\n f.writelines('%5i %5i %5i %5i' %(kount+1, nsmx, nrsx, iopsysx))\r\n f.writelines(' modex, nsmx, nrsx, oiopsysx \\n')\r\n f.writelines(' %5.3f %1i %5.3f %5.3f ' %(twshx,isectw,thres1,thres2))\r\n f.writelines(' twshx,isectw,thres1,thres2 \\n')\r\n f.writelines(' %5.3e %5.3e %5.3e %5.3e %5.1f %5.1f\\n'%(tau0, tau1, thet0, thet1, hpfac, gndfac))\r\n #hlat\r\n if interaction =='Bauschinger':\r\n count = 0\r\n for i in range(len(sn)*len(sb[0])*m):\r\n f.writelines(' %5.2f ' %(hmat[count]))\r\n count = count + 1\r\n f.writelines('\\n')\r\n elif interaction =='Latent':\r\n count = 0\r\n for i in range(len(sn)*len(sb[0])*m):\r\n if count == kount:\r\n f.writelines(' %5.2f ' %(hii))\r\n else:\r\n f.writelines(' %5.2f ' %(hmat[count]))\r\n count = count + 1\r\n f.writelines('\\n')\r\n elif interaction =='Bauschinger Latent':\r\n print\r\n print 'Basuchinger Latent mode for iopsysx == 1 is not possible'\r\n print 'Therefore, it has been hardwired to be Latent in this case'\r\n print\r\n count = 0\r\n for i in range(len(sn)*len(sb[0])*m):\r\n if count == kount:\r\n f.writelines(' %5.2f ' %(hii))\r\n else:\r\n f.writelines(' %5.2f ' %(hmat[count]))\r\n count = count + 1\r\n f.writelines('\\n')\r\n \r\n \r\n #index\r\n f.writelines(' %2i %2i %2i %2i %2i %2i \\n' %(sn[j][0], sn[j][1], sn[j][2],\r\n sb[j][k][0], sb[j][k][1], sb[j][k][2]))\r\n kount = kount + 1\r\n if iopsysx == 0: #'Bauschinger Latent both possible'\r\n\r\n f.writelines(' (%2i%2i%2i) <%2i%2i%2i> system \\n' %(sn[j][0], sn[j][1], sn[j][2],\r\n sb[j][k][0], sb[j][k][1], sb[j][k][2]))\r\n f.writelines('%5i %5i %5i %5i' %(kount+1, nsmx, nrsx, iopsysx))\r\n f.writelines(' modex, nsmx, nrsx, oiopsysx \\n')\r\n f.writelines(' %5.3f %1i %5.3f %5.3f ' %(twshx,isectw,thres1,thres2))\r\n f.writelines(' twshx,isectw,thres1,thres2 \\n')\r\n f.writelines(' %5.3e %5.3e %5.3e %5.3e %5.1f %5.1f \\n' %(tau0, tau1, thet0, thet1, hpfac, gndfac))\r\n #hlat\r\n count = 0\r\n for i in range(len(sn)*len(sb[0])*m):\r\n if kount == count:\r\n f.writelines(' %5.2f ' %(hii))\r\n elif kount+1 == count:\r\n f.writelines(' %5.2f ' %(hb))\r\n else:\r\n f.writelines(' %5.2f ' %(hij))\r\n count = count + 1\r\n f.writelines('\\n')\r\n \r\n #index\r\n f.writelines(' %2i %2i %2i %2i %2i %2i \\n' %(sn[j][0], sn[j][1], sn[j][2],\r\n sb[j][k][0], sb[j][k][1], sb[j][k][2]))\r\n kount = kount + 1\r\n\r\n f.writelines(' (%2i%2i%2i) <%2i%2i%2i> system \\n' %(sn[j][0], sn[j][1], sn[j][2],\r\n -sb[j][k][0],-sb[j][k][1],-sb[j][k][2]))\r\n f.writelines('%5i %5i %5i %5i' %(kount+1, nsmx, nrsx, iopsysx))\r\n f.writelines(' modex, nsmx, nrsx, oiopsysx \\n')\r\n f.writelines(' %5.3f %1i %5.3f %5.3f ' %(twshx,isectw,thres1,thres2))\r\n f.writelines(' twshx,isectw,thres1,thres2 \\n')\r\n f.writelines(' %5.3e %5.3e %5.3e %5.3e %5.1f %5.1f \\n' %(tau0, tau1, thet0, thet1, hpfac, gndfac))\r\n #hlat\r\n count = 0\r\n for i in range(len(sn)*len(sb[0])*m):\r\n if kount == count:\r\n f.writelines(' %5.2f ' %(hii))\r\n elif kount - 1== count:\r\n f.writelines(' %5.2f ' %(hb))\r\n else:\r\n f.writelines(' %5.2f ' %(hij))\r\n count = count + 1\r\n f.writelines('\\n')\r\n #index\r\n f.writelines(' %2i %2i %2i %2i %2i %2i \\n' %(sn[j][0], sn[j][1], sn[j][2],\r\n -sb[j][k][0], -sb[j][k][1], -sb[j][k][2]))\r\n kount = kount + 1\r\n\r\n \r\n if iopsysx == 1: #No directionality in slip direction\r\n pass\r\n elif iopsysx == 0: #Slip system has the directionality\r\n # modes = len(sn) * 2\r\n pass\r\n else :\r\n print\r\n print 'Err: Wrong dependency!'\r\n print\r\n return -1\r\n f.close()\r\n\r\n print\r\n print '***********************************************************'\r\n print 'Creation of the requested single crystal file has completed'\r\n print '***********************************************************'\r\n print", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def __init__(self,image,hhr=50,input_channels='rgb'): #revisar\n assert len(image.shape)==3\n\n self.hhr=hhr\n '''\n if input_channels=='rgb':\n self.image=self.rgb_to_bgr(image)\n else:\n '''\n self.image=image\n self.entropy=0\n self.HHR=0\n self.PVM = np.zeros((3,1))\n self.area=0\n self.brightness = 0\n self.color = ('r','g','b')\n self.PVM_12= np.zeros((3,4))", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def SetOutputParametersFromImage(self, image: 'itkImageBase3') -> \"void\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICF3_SetOutputParametersFromImage(self, image)", "def load_camera_params( hf, path ):\n\n R = hf[ path.format('R') ][:]\n R = R.T\n\n T = hf[ path.format('T') ][:]\n f = hf[ path.format('f') ][:]\n c = hf[ path.format('c') ][:]\n k = hf[ path.format('k') ][:]\n p = hf[ path.format('p') ][:]\n\n name = hf[ path.format('Name') ][:]\n name = \"\".join( [chr(item) for item in name] )\n\n return R, T, f, c, k, p, name", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def _readHeader(self):\n self._head = fits.getheader(self.fitsfile)\n\n # if self._head['NAXIS'] == 2:\n # # The FITS is an image file\n # self.type = 'image'\n # xkw = ['CTYPE1', 'CRPIX1', 'CRVAL1', 'CDELT1', 'CUNIT1']\n # ykw = ['CTYPE2', 'CRPIX2', 'CRVAL2', 'CDELT2', 'CUNIT2']\n\n # # Check that all keywords are present\n # allkwd = all([k in self._head.keys() for k in xkw + ykw])\n # assert allkwd, \"Fits '{}' is not a standard image.\".format(self._fitsfile)\n\n # # Get a subset of the header dictionnary\n # self.xcoord = {k:v for k, v in self._head.items() if k in xkw}\n # self.ycoord = {k:v for k, v in self._head.items() if k in ykw}\n\n # # Get the data\n # self.data = fits.getdata(self.fitsfile, memmap=True)\n\n # else:\n # self.type = 'other'\n\n try:\n # The FITS is an image file\n self.type = 'image'\n xkw = ['CTYPE1', 'CRPIX1', 'CRVAL1', 'CDELT1', 'CUNIT1']\n ykw = ['CTYPE2', 'CRPIX2', 'CRVAL2', 'CDELT2', 'CUNIT2']\n\n # Check that all keywords are present\n allkwd = all([k in self._head.keys() for k in xkw + ykw])\n assert allkwd, \"Fits '{}' is not a standard image.\".format(self._fitsfile)\n\n # Get a subset of the header dictionnary\n self.xcoord = {k:v for k, v in self._head.items() if k in xkw}\n self.ycoord = {k:v for k, v in self._head.items() if k in ykw}\n\n # Get the data\n self.data = np.squeeze(fits.getdata(self.fitsfile, memmap=True))\n\n except:\n self.type = 'other'\n\n return", "def _gen_header():\n\n shared_fields = [\n ('nx', numpy.int32),\n ('ny', numpy.int32),\n ('nz', numpy.int32),\n ('mode', numpy.int32),\n ('nxstart', numpy.int32),\n ('nystart', numpy.int32),\n ('nzstart', numpy.int32),\n ('mx', numpy.int32),\n ('my', numpy.int32),\n ('mz', numpy.int32),\n ('xlen', numpy.float32),\n ('ylen', numpy.float32),\n ('zlen', numpy.float32),\n ('alpha', numpy.float32), # defocus\n ('beta', numpy.float32), # astig_ang\n ('gamma', numpy.float32), # astig_mag\n ('mapc', numpy.int32),\n ('mapr', numpy.int32),\n ('maps', numpy.int32),\n ('amin', numpy.float32),\n ('amax', numpy.float32),\n ('amean', numpy.float32),\n ('ispg', numpy.int32),\n ('nsymbt', numpy.int32),\n ]\n\n header_image_dtype = numpy.dtype(shared_fields + [\n ('extra', 'S100'),\n ('xorigin', numpy.float32), # 208 320 4 char cmap; Contains \"MAP \"\n ('yorigin', numpy.float32),\n ('zorigin', numpy.float32),\n ('map', 'S4'),\n ('byteorder', numpy.int32),\n ('rms', numpy.float32),\n ('nlabels', numpy.int32),\n ('label0', 'S80'),\n ('label1', 'S80'), # Image Type\n # Allow image `type`s are:\n # - Power Spectra: 'P'\n # - Windowed Particle: 'W'\n # - Micrograph: 'M'\n ('label2', 'S80'),\n ('label3', 'S80'),\n ('label4', 'S80'),\n ('label5', 'S80'),\n ('label6', 'S80'),\n ('label7', 'S80'),\n ('label8', 'S80'),\n ('label9', 'S80'),\n ])\n\n return header_image_dtype", "def readIDCCoeffs(self, header):\n coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale',\n 'idcv2ref', 'idcv3ref', 'idctheta']\n for c in coeffs:\n self.__setattr__(c, header.get(c, None))", "def SetOutputParametersFromImage(self, image: 'itkImageBase2') -> \"void\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICF2_SetOutputParametersFromImage(self, image)", "def SetOutputParametersFromImage(self, image: 'itkImageBase3') -> \"void\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF23_SetOutputParametersFromImage(self, image)", "def load_camera_params(hf, path):\n\n R = hf[path.format('R')][:]\n R = R.T\n\n T = hf[path.format('T')][:]\n f = hf[path.format('f')][:]\n c = hf[path.format('c')][:]\n k = hf[path.format('k')][:]\n p = hf[path.format('p')][:]\n\n name = hf[path.format('Name')][:]\n name = \"\".join([chr(int(item[0])) for item in name])\n\n return R, T, f, c, k, p, name", "def SetOutputParametersFromImage(self, image: 'itkImageBase2') -> \"void\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF32_SetOutputParametersFromImage(self, image)", "def failover_cbf(cbf_file):\n\n header = {}\n\n header[\"two_theta\"] = 0.0\n\n for record in open(cbf_file):\n\n if \"_array_data.data\" in record:\n break\n\n if \"PILATUS 2M\" in record:\n header[\"detector_class\"] = \"pilatus 2M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (1679, 1475)\n continue\n\n if \"PILATUS3 2M\" in record:\n header[\"detector_class\"] = \"pilatus 2M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (1679, 1475)\n continue\n\n if \"PILATUS 6M\" in record:\n header[\"detector_class\"] = \"pilatus 6M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (2527, 2463)\n continue\n\n if \"PILATUS3 6M\" in record:\n header[\"detector_class\"] = \"pilatus 6M\"\n header[\"detector\"] = \"dectris\"\n header[\"size\"] = (2527, 2463)\n continue\n\n if \"Start_angle\" in record:\n header[\"phi_start\"] = float(record.split()[-2])\n continue\n\n if \"Angle_increment\" in record:\n header[\"phi_width\"] = float(record.split()[-2])\n continue\n\n if \"Exposure_period\" in record:\n header[\"exposure_time\"] = float(record.split()[-2])\n continue\n\n if \"Silicon sensor\" in record:\n header[\"sensor\"] = 1000 * float(record.split()[4])\n continue\n\n if \"Count_cutoff\" in record:\n header[\"saturation\"] = int(record.split()[2])\n continue\n\n if \"Detector_distance\" in record:\n header[\"distance\"] = 1000 * float(record.split()[2])\n continue\n\n if \"Wavelength\" in record:\n header[\"wavelength\"] = float(record.split()[-2])\n continue\n\n if \"Pixel_size\" in record:\n header[\"pixel\"] = (\n 1000 * float(record.split()[2]),\n 1000 * float(record.split()[5]),\n )\n continue\n\n if \"Beam_xy\" in record:\n\n # N.B. this is swapped again for historical reasons\n\n beam_pixels = map(\n float,\n record.replace(\"(\", \"\").replace(\")\", \"\").replace(\",\", \"\").split()[2:4],\n )\n header[\"beam\"] = (\n beam_pixels[1] * header[\"pixel\"][1],\n beam_pixels[0] * header[\"pixel\"][0],\n )\n header[\"raw_beam\"] = (\n beam_pixels[1] * header[\"pixel\"][1],\n beam_pixels[0] * header[\"pixel\"][0],\n )\n continue\n\n # try to get the date etc. literally.\n\n try:\n datestring = record.split()[-1].split(\".\")[0]\n format = \"%Y-%b-%dT%H:%M:%S\"\n struct_time = time.strptime(datestring, format)\n header[\"date\"] = time.asctime(struct_time)\n header[\"epoch\"] = time.mktime(struct_time)\n\n except Exception:\n pass\n\n try:\n\n if not \"date\" in header:\n datestring = record.split()[-1].split(\".\")[0]\n format = \"%Y-%m-%dT%H:%M:%S\"\n struct_time = time.strptime(datestring, format)\n header[\"date\"] = time.asctime(struct_time)\n header[\"epoch\"] = time.mktime(struct_time)\n\n except Exception:\n pass\n\n try:\n\n if not \"date\" in header:\n datestring = record.replace(\"#\", \"\").strip().split(\".\")[0]\n format = \"%Y/%b/%d %H:%M:%S\"\n struct_time = time.strptime(datestring, format)\n header[\"date\"] = time.asctime(struct_time)\n header[\"epoch\"] = time.mktime(struct_time)\n\n except Exception:\n pass\n\n header[\"phi_end\"] = header[\"phi_start\"] + header[\"phi_width\"]\n\n return header" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all occurences of val on list lo Returns a list of indices of val on lo.
def findall(lo,val): u = [] i = -1 while( i < len(lo)-1): try: i = lo.index(val,i+1) u.append(i) except: i += 1 return u
[ "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def __indexs(arr, val):\n indexs = []\n for i in range(0, len(arr)):\n if arr[i] == val:\n indexs.append(i)\n return indexs if len(indexs) is not 0 else None", "def locate_all_occurrence(l, e):\n return [i for i, x in enumerate(l) if x == e]", "def search_all(self, node_val):\n ans = []\n for i, node in enumerate(self.nodes):\n if node.val == node_val:\n ans.append(i + 1)\n return ans", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def all_indices(haystack, needle):\n index = 0\n indices = list()\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n indices.append(i)\n index = i+1\n return indices", "def linear_search(v, l):\n i = 0\n for value in l:\n if value == v:\n return i\n i += 1\n return len(l)", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]", "def indexMultiple(x,value):\n return [ i[0] for i in enumerate(x) if i[1] == value ]", "def find_all_indices(elements, value, key=lambda x: x):\r\n left = find_leftmost_index(elements, value, key)\r\n right = find_rightmost_index(elements, value, key)\r\n if left and right:\r\n return set(range(left, right+1))\r\n return set()", "def list_indices(self):", "def findall(haystack, needle, idx=0):\n offsets = []\n while idx > -1:\n idx = haystack.find(needle, idx)\n if idx > -1:\n offsets.append(idx)\n idx += 1\n return offsets", "def common_inds(arrs):\n\tvals = common_vals(arrs)\n\treturn [find(arr, vals) for arr in arrs]", "def get_indices(num_list, target):\n\n for idx, num in enumerate(num_list):\n diff = target - num\n if diff in num_list and num_list.index(diff) != idx:\n return [idx, num_list.index(diff)]\n return (\"Cannot find target\")", "def returnIndexes(labels):\n indexes = []\n for label in labels:\n label = label.replace(\"L\",\"\").replace(\":\",\"\")\n try:\n label = int(label)\n except ValueError:\n continue \n indexes.append(label)\n return indexes", "def get_all_indices(L):\n all_indices = jnp.transpose(jnp.array(jnp.meshgrid(jnp.arange(L, dtype=jnp.int32), jnp.arange(L, dtype=jnp.int32)))).reshape(-1, 2)\n return all_indices", "def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find overall 3D rotation (phi theta psi) between two sets of Eulerian angles. The two sets have to be of the same length and it is assume that k'th element on the first list corresponds to the k'th element on the second list.
def rotation_between_anglesets(agls1, agls2): from math import sin, cos, pi, sqrt, atan2, acos, atan from numpy import array, linalg, matrix import types deg2rad = pi/180.0 def ori2xyz(ori): if(type(ori) == types.ListType): phi, theta, psi = ori[:3] else: # it has to be Transformation object d = ori.get_params("spider") phi = d["phi"] theta = d["theta"] psi = d["psi"] """ # This makes no sense here! PAP 09/2011 if theta > 90.0: phi += 180.0 theta = 180.0-theta """ phi *= deg2rad theta *= deg2rad x = sin(theta) * sin(phi) y = sin(theta) * cos(phi) z = cos(theta) return [x, y, z] N = len(agls1) if N != len(agls2): print 'Both lists must have the same length' return -1 if N < 2: print 'At least two orientations are required in each list' return -1 U1, U2 = [], [] for n in xrange(N): p1 = ori2xyz(agls1[n]) p2 = ori2xyz(agls2[n]) U1.append(p1) U2.append(p2) # compute all Suv with uv = {xx, xy, xz, yx, ..., zz} Suv = [0] * 9 c = 0 nbori = len(U1) for i in xrange(3): for j in xrange(3): for s in xrange(nbori): Suv[c] += (U2[s][i] * U1[s][j]) c += 1 # create matrix N N = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]], [Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]], [Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]], [Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]]) # eigenvector corresponding to the most positive eigenvalue val, vec = linalg.eig(N) q0, qx, qy, qz = vec[:, val.argmax()] # create quaternion Rot matrix r = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0, 2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0, 2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0] R = Transform(r) dictR = R.get_rotation('SPIDER') return dictR['phi'], dictR['theta'], dictR['psi']
[ "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, timestep):\r\n\r\n for i in range(sphere_positions.shape[0]):\r\n R0 = sphere_positions[i]\r\n O = (Oa_out[i][0] ** 2 + Oa_out[i][1] ** 2 + Oa_out[i][2] ** 2) ** 0.5\r\n\r\n ''' To rotate from basis (x,y,z) to (X,Y,Z), where x,y,z,X,Y,Z are unit vectors,\r\n you just need to multiply by the matrix\r\n ( X_x Y_x Z_x )\r\n ( X_y Y_y Z_y ),\r\n ( X_z Y_z Z_z )\r\n where X_x means the x-component of X.\r\n Our Z is Omega = o_spheres[i], so we need to make it into a complete basis.\r\n To do that we pick a unit vector different to Omega (either zhat or xhat depending on Omega)\r\n and use (Omega x zhat, Omega x (Omega x zhat), zhat) as our basis (X,Y,Z).\r\n That's it! [Only took me three days...]\r\n '''\r\n\r\n if np.array_equal(Oa_out[i], [0, 0, 0]):\r\n rot_matrix = np.identity(3)\r\n else:\r\n Otest = (abs(Oa_out[i] / O)).astype('float')\r\n perp1 = [0, 0, 1] if np.allclose(Otest, [1, 0, 0]) else [1, 0, 0]\r\n rot_matrix = np.array([np.cross(Oa_out[i], perp1) / O, np.cross(Oa_out[i], np.cross(Oa_out[i], perp1)) / O ** 2, Oa_out[i] / O]).transpose()\r\n\r\n for j in range(2):\r\n ''' rb0 is the position (\"r\") of the endpoint of the pointy rotation vector in the\r\n external (x,y,z) frame (\"b\") at the beginning of this process (\"0\") '''\r\n rb0 = sphere_rotations[i, j]\r\n\r\n ''' rbdashdash0_xyz is the position of the same endpoint in the frame of the rotating sphere (\"b''\"),\r\n\t\t\t\t\t\twhich we set to have the z-axis=Omega axis. It's in Cartesian coordinates. '''\r\n rbdashdash0_xyz = np.dot(linalg.inv(rot_matrix), (rb0 - R0))\r\n x0 = rbdashdash0_xyz[0]\r\n y0 = rbdashdash0_xyz[1]\r\n z0 = rbdashdash0_xyz[2]\r\n\r\n r0 = (x0 ** 2 + y0 ** 2 + z0 ** 2) ** 0.5\r\n t0 = np.arccos(z0 / r0)\r\n p0 = 0 if (x0 == 0 and y0 == 0) else np.arctan2(y0, x0)\r\n r = r0\r\n t = t0\r\n p = euler_timestep(p0, O, timestep)\r\n\r\n x = r * np.sin(t) * np.cos(p)\r\n y = r * np.sin(t) * np.sin(p)\r\n z = r * np.cos(t)\r\n rbdashdash_xyz = np.array([x, y, z])\r\n R = new_sphere_positions[i]\r\n rb = R + np.dot(rot_matrix, rbdashdash_xyz)\r\n new_sphere_rotations[i, j] = rb\r\n return new_sphere_rotations", "def check_angles(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicAngleForce, \"Error: forces must be HarmonicAngleForces\"\n\n n_angles0 = force0.getNumAngles()\n n_angles1 = force1.getNumAngles()\n\n dict0, dict1 = {}, {}\n\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(0)\n #unit_theta = theta0.unit\n unit_theta = u.degrees\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.degrees)**2\n\n for k in range(n_angles0):\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict0[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n for k in range(n_angles1):\n i0, i1, i2, theta0, k0 = force1.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict1[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Angles0 - Angles1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Angles1 - Angles0 = %s\" % (keys1.difference(keys0)))\n diff_keys = keys0.symmetric_difference(keys1)\n assert diff_keys == set(), \"Systems have different HarmonicAngleForce entries: extra keys are: \\n%s\" % diff_keys\n\n for k, parameter_name in enumerate([\"theta0\", \"k0\"]):\n for (i0, i1, i2) in dict0.keys():\n val0 = dict0[i0, i1, i2][k]\n val1 = dict1[i0, i1, i2][k]\n if parameter_name=='theta0':\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has angle values of %f and %f degrees, respectively.\" % (i0, i1, i2, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has force constant values of %f and %f kJ/(mol degree**2), respectively.\" % (i0, i1, i2, val0, val1)", "def test_rotation_against_quat(self):\n v1 = np.random.rand(3)\n v2 = np.random.rand(3)\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n\n rot = helpers.get_rotation_matrix(v1, v2)\n vector, angle = helpers.get_rotation_vector_and_angle(v1, v2)\n quat = Quaternion(vector=vector, angle=angle).unit()\n rot_quat = quat.basis()\n for x_row, y_row in zip(rot, rot_quat):\n for a, b in zip(x_row, y_row):\n self.assertAlmostEqual(a, b)\n # v2_bis = rot@v1.T\n # v2_tris = rot_quat@v1.T", "def get_rotation_plane(f1, f2):\n [pt1, pt2] = get_common_verts(f1.vertices, f2.vertices)\n v1 = pt1 - pt2\n v2 = pt2 - f1.face_center\n v3 = pt1 - f2.face_center\n v4 = np.random.uniform(size=4)\n # Check if resulting matrix is full-rank as expected.\n # Taken from: https://stackoverflow.com/a/13252541/1826912\n a = np.array([v1, v2, v3, v4])\n if np.linalg.cond(a) > 1e5:\n print(\"The vectors aren't independent\\\n as we were expecting!\")\n breakpoint()\n e = four_vec_gram_schmidt(v1, v2, v3, v4)\n u4 = e[3]\n pt3 = pt1 + u4\n return pt1, pt2, pt3", "def spherical_coord_align_rotation(v1, v2):\n a = unit_vector(v1)\n b = unit_vector(v2)\n a[2] = b[2] = 0\n azimuth = angle(a, b, deg=False, ref_plane=np.array([0, 0, 1]))\n\n assert -np.pi <= azimuth <= np.pi\n xyrot = zrotate(azimuth, deg=False)[:3, :3]\n vs = v1.dot(xyrot.T)\n elaxis = np.cross(vs, v2)\n elevation = angle(vs, v2, deg=False, ref_plane=elaxis)\n\n assert -np.pi <= elevation <= np.pi\n elrot = rotation_matrix(elevation, elaxis, deg=False)[:3, :3]\n Q = elrot.dot(xyrot)\n assert np.allclose(la.det(Q), 1.0)\n return Q", "def solve_r3_rotation_for_angles_given_axes(\n R, e1, e2, e3, smaller_phi2_solution=True, return_both_solutions=False, deg=False\n):\n\n assert R.is_r3_rotation_matrix()\n e1 = matrix.col(e1).normalize()\n e2 = matrix.col(e2).normalize()\n e3 = matrix.col(e3).normalize()\n # Fail if e2 & e3 are parallel\n e2xe3 = e2.cross(e3)\n if e2xe3.length_sq() < 1.0e-6:\n return None\n # Make a unit test vector\n u = e2xe3.normalize()\n e1e2 = e1.dot(e2)\n e1e3 = e1.dot(e3)\n e2e3 = e2.dot(e3)\n e1e2e3 = e1.dot(e2.cross(e3))\n Re3 = R * e3\n e1Re3 = e1.dot(Re3)\n # ** Step 1 ** Calculation of phi2 (Bricogne equation (4))\n # e1.(R e3) = (e1.e2)(e2.e3) + {(e1.e3) - (e1.e2)(e2.e3)} cos(phi2)\n # + (e1.e2 x e3) sin(phi2)\n # The coefficients of cos & sin phi2\n cc = e1e3 - e1e2 * e2e3\n ss = e1e2e3\n # Fail if both are zero (indeterminate)\n if abs(cc) < 1.0e-6 and abs(ss) < 1.0e-6:\n return None\n norm = math.sqrt(cc * cc + ss * ss)\n rhs = (e1Re3 - e1e2 * e2e3) / norm\n # abs(rhs) should not be greater than 1.0, allowing a small tolerance\n if abs(rhs) > 1.000002:\n return None\n if rhs > 1.0:\n rhs = 1.0\n elif rhs < -1.0:\n rhs = -1.0\n cc /= norm\n ss /= norm\n # Solve rhs = cos(phi2) * cc + sin(phi2) * ss\n # using cos(a-b) = cos(a) cos(b) + sin(a) sin(b)\n # where b = phi2\n a = math.atan2(ss, cc)\n amb = math.acos(rhs)\n # Two solutions in range -pi to +pi\n # Note that if e1 == e3, ss = 0, a = 0 & phi2b = -phi2a\n phi2a = a - amb\n if phi2a > math.pi:\n phi2a -= 2.0 * math.pi\n elif phi2a < -math.pi:\n phi2a += 2.0 * math.pi\n phi2b = a + amb\n if phi2b > math.pi:\n phi2b -= 2.0 * math.pi\n elif phi2b < -math.pi:\n phi2b += 2.0 * math.pi\n if return_both_solutions:\n phi2_ = (phi2a, phi2b)\n elif smaller_phi2_solution:\n if abs(phi2a) < abs(phi2b):\n phi2_ = (phi2a,)\n else:\n phi2_ = (phi2b,)\n else:\n if abs(phi2a) > abs(phi2b):\n phi2_ = (phi2a,)\n else:\n phi2_ = (phi2b,)\n solutions = []\n for phi2 in phi2_:\n # ** Step 2 ** Calculation of phi1\n R2 = e2.axis_and_angle_as_r3_rotation_matrix(phi2, deg=False)\n R2inv = R2.transpose()\n v = R2 * e3\n w = Re3\n v1 = v - (v.dot(e1)) * e1\n w1 = w - (w.dot(e1)) * e1\n norm = v1.dot(v1) * w1.dot(w1)\n # If norm = 0, rotations 1 & 3 are around same axis (for this phi2),\n # so any value for phi1 is OK\n if norm > 1.0e-8:\n norm = math.sqrt(norm)\n # cos(phi1) = (v1.w1)/norm\n # sin(phi1) = (v1.w1 x e1)/norm\n phi1 = math.atan2(v1.dot(w1.cross(e1)) / norm, v1.dot(w1) / norm)\n if phi1 > math.pi:\n phi1 -= 2.0 * math.pi\n if phi1 < -math.pi:\n phi1 += 2.0 * math.pi\n else:\n phi1 = 0.0\n # ** Step 3 ** Calculation of phi3\n R1inv = e1.axis_and_angle_as_r3_rotation_matrix(-1.0 * phi1, deg=False)\n R3 = R2inv * R1inv * R\n R3u = R3 * u\n # sin(phi3) = u.R3u x e3\n # cos(phi3) = u.R3u\n phi3 = math.atan2(u.dot(R3u.cross(e3)), u.dot(R3u))\n if deg:\n phi1, phi2, phi3 = tuple([x * 180 / math.pi for x in (phi1, phi2, phi3)])\n solutions.append((phi1, phi2, phi3))\n\n if return_both_solutions:\n return solutions\n else:\n return solutions[0]", "def angle_hkls(self, h1, h2):\n h1v = norm_vec((vec(*h1).T * self.Bmat)).T\n h2v = norm_vec((vec(*h2).T * self.Bmat)).T\n return np.around(np.arccos(h1v.T*h2v)[0, 0] * degrees, 3)", "def find_rotation_and_translation(source_points, dest_points):\n source_points = np.array(source_points)\n dest_points = np.array(dest_points)\n for rotation in ROTATIONS:\n source_points_rotated = rotation.dot(source_points.T).T\n translations = dest_points - source_points_rotated\n if np.all(translations == translations[0]):\n return rotation, translations[0]", "def get_phi_kappa_omega(self, angles):\n (phi) = angles[0]\n (kappa) = angles[1]\n (omega) = angles[2]\n return (phi, kappa, omega)", "def rotate(self, ra1, dec1, ra2, dec2, ra3, dec3):\n # Turns Right Ascension/Declination into Azimuth/Zenith for healpy\n phi1 = ra1 - np.pi\n zen1 = np.pi/2. - dec1\n phi2 = ra2 - np.pi\n zen2 = np.pi/2. - dec2\n phi3 = ra3 - np.pi\n zen3 = np.pi/2. - dec3\n\n # Rotate each ra1 and dec1 towards the pole?\n x = np.array([hp.rotator.rotateDirection(\n hp.rotator.get_rotation_matrix((dp, -dz, 0.))[0], z, p)\n for z, p, dz, dp in zip(zen1, phi1, zen2, phi2)])\n\n # Rotate **all** these vectors towards ra3, dec3 (source_path)\n zen, phi = hp.rotator.rotateDirection(np.dot(\n hp.rotator.get_rotation_matrix((-phi3, 0, 0))[0],\n hp.rotator.get_rotation_matrix((0, zen3, 0.))[0]), x[:, 0], x[:, 1])\n\n dec = np.pi/2. - zen\n ra = phi + np.pi\n return np.atleast_1d(ra), np.atleast_1d(dec)", "def rotated_intersections(self):\n slices = self.find_active_intersections()\n rotation_angle = slices[0]\n slices = slices - rotation_angle\n\n return rotation_angle, slices + (slices < 0)*2.*np.pi", "def _get_rotation(self, p1, p2):\n delta = p2 - p1\n l = np.linalg.norm(delta)\n return np.array([[delta[1] / l, -delta[0] / l], [delta[0] / l, delta[1] / l]])", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def shear_rot(g1,g2,phi2=0):\n cos2 = np.cos(phi2)\n sin2 = np.sin(phi2)\n g1rot = cos2 * g1 - sin2 * g2 #from rotation x'=-(cos(2*theta)*x+sin(2*theta)*y)\n g2rot = sin2 * g1 + cos2 * g2\n return g1rot, g2rot", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def get_optimal_rotation_and_translation(x1, x2):\n t = get_optimal_translation(x1, x2)\n x1 = x1 - jnp.mean(x1, axis=0)\n x2 = x2 - jnp.mean(x2, axis=0)\n return get_optimal_rotation(x1, x2), t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve pixel size from the header. We check attribute Pixel_size and also pixel size from ctf object, if exisits. If the two are different or if the pixel size is not set, return 1.0 and print a warning.
def get_pixel_size(img): p1 = img.get_attr_default("apix_x", -1.0) cc = img.get_attr_default("ctf", None) if cc == None: p2 = -1.0 else: p2 = round(cc.apix, 3) if p1 == -1.0 and p2 == -1.0: ERROR("Pixel size not set", "get_pixel_size", 0) return -1.0 elif p1 > -1.0 and p2 > -1.0: if abs(p1-p2) >= 0.001: ERROR("Conflict between pixel size in attribute and in ctf object", "get_pixel_size", 0) # pixel size is positive, so what follows omits -1 problem return max(p1, p2) else: return max(p1, p2)
[ "def get_image_size(self):", "def GetSizeCX(self):\n ...", "def getSizePix(self):\r\n if 'sizePix' in self.currentCalib:\r\n return self.currentCalib['sizePix']\r\n else:\r\n return None", "def get_size(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n size = file_meta_plist['$objects'][1]['Size']\n return size\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['size']", "def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size", "def _getSize(extra_fields) -> int:\n # Load BGZF required BC field\n BC = None\n for field in extra_fields:\n if field.tag == b'BC':\n BC = field.data\n break\n if BC:\n size = int.from_bytes(BC, byteorder='little', signed=False) + 1 # type: int\n if size is not None:\n return size\n raise InvalidBGZF(\"Missing block size (BC) extra field in bgzf header.\")", "def get_pix_size(self):\n aperture_dict = self.get_aperture_pars()\n pix_size = aperture_dict['pix']\n return pix_size", "def getSize(self):\n outSize = float2()\n _res = self.mAPIContext.SDGraphObjectFrame_getSize(self.mHandle, ctypes.byref(outSize))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return outSize", "def getPixelSize(self):\n return _yarp.ImageRgbFloat_getPixelSize(self)", "def getPixelSize(self):\n return _yarp.ImageFloat_getPixelSize(self)", "def GetSize(self) -> \"itkSize2 const &\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF32_GetSize(self)", "def __header_size(self):\n return self.SIZE_LINEUPS + self.SIZE_PLAYERS_PER_LINEUP", "def GetSize(self) -> \"itkSize3 const &\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF43_GetSize(self)", "def byte_size(self) -> int:\n return pixel_formats[self._dtype][3] * self._components * self.width * self.height", "def readSizeF(self):\n pass", "def GetSize(self) -> \"itkSize2 const &\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF42_GetSize(self)", "def GetSize(self) -> \"itkSize2 const &\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF22_GetSize(self)", "def numPixels(self):\n\t\treturn self.size", "def GetSize(self) -> \"itkSize3 const &\":\n return _itkGenerateImageSourcePython.itkGenerateImageSourceICVF23_GetSize(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For the given grouping, convert ROOT files into DataFrames merging groups together. Return a dictionary mapping file names to DataFrames.
def process_group(directory: str, files: dict, channel: str, year: str) -> dict: if len(files) == 0: raise Exception('empty file list for directory {}'.format(directory)) + 1 dataframes = {} for name, ifile in files.items(): # equivalent of hadding update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree') current_dfs = [] for update_df in update_dfs: update_df.fillna(-999, inplace=True) current_dfs.append(update_df) if len(current_dfs) > 0: dataframes[name] = pd.concat(current_dfs) dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]}) return dataframes
[ "def _load_group_data(directory='', file_name='', df=True):\n\n # check if folder exists with experiment name\n if os.path.isdir(directory) is False:\n print 'making new directory to save data'\n os.mkdir(directory)\n \n # all files in directory\n files = os.listdir(directory)\n\n # if data file already exists\n if file_name in files:\n print 'group data found:', file_name\n\n # if data stored as pandas dataframe\n if df:\n # load data\n print directory+file_name\n group_data = pd.read_pickle(directory+file_name)\n print 'group data loaded'\n\n # if stored as dictionary\n else:\n # load data\n with open(directory+file_name, 'rb') as pkl_file:\n group_data= pickle.load(pkl_file)\n print 'group data loaded'\n\n # otherwise create data structure\n else:\n # data organized as {frequency}{syn distance}{number of synapses}{polarity}[trial]{data type}{tree}[section][segment][spikes]\n print 'no group data found'\n if df:\n group_data = pd.DataFrame()\n else:\n group_data= {}\n\n return group_data", "def split(df, group):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby(group)\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def get_data(paths, df_names, categorical_feats, groupby=None, exclude_classes=[], rel_cols=None, sep=\",\"):\n\n def _load_data(path, sep=sep):\n \"\"\"small function to load according to the dataformat. (excel or csv)\"\"\"\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df\n\n # initialize list to store dataframes in\n dfs = []\n\n # Handle single path input\n if groupby and (len(paths) == 1 or isinstance(paths, str)):\n\n # load data depending on if the single path is given in a list of as string\n if isinstance(paths, str):\n data = _load_data(paths, sep)\n elif isinstance(paths, list):\n data = _load_data(*paths, sep)\n else:\n raise ValueError(\"It seems like the input was a single path. Please input path as string or inside a list.\")\n\n grouping = data.groupby(groupby)\n\n # split dataframe groups and create a list with all dataframes\n for name, grp in grouping:\n # skip class if it should be excluded\n if name in exclude_classes:\n continue\n\n df = grouping.get_group(name)[::]\n\n # consider all columns as relevant is no rel_cols given.\n if rel_cols is None:\n rel_cols = list(df)\n\n # consider the relevant columns\n dfs.append(df[rel_cols])\n\n # Handle multiple paths input\n elif len(paths) > 1:\n for path in paths:\n df = _load_data(path)\n dfs.append(df)\n\n return DataCollection(dfs, df_names, categorical_feats)", "def _load_group_data(directory='', file_name=''):\n \n # all files in directory\n files = os.listdir(directory)\n \n # if data file already exists\n if file_name in files:\n # load data\n print 'group data found:', file_name\n with open(directory+file_name, 'rb') as pkl_file:\n group_data= pickle.load(pkl_file)\n print 'group data loaded'\n # otherwise create data structure\n else:\n # data organized as {frequency}{syn distance}{number of synapses}{polarity}[trial]{data type}{tree}[section][segment][spikes]\n print 'no group data found'\n group_data= {}\n\n return group_data", "def _load_group_data(self, **kwargs):\n\n # identify data folder\n data_folder = 'Data/'+kwargs['experiment']+'/'\n \n # all files in directory\n files = os.listdir(data_folder)\n\n save_string = kwargs['save_string']\n # if data file already exists\n if save_string in files:\n # load data\n print 'group data found:', save_string\n with open(data_folder+save_string, 'rb') as pkl_file:\n group_data= pickle.load(pkl_file)\n print 'group data loaded'\n # otherwise create data structure\n else:\n # data organized as {frequency}{syn distance}{number of synapses}{polarity}[trial]{data type}{tree}[section][segment][spikes]\n print 'no group data found'\n group_data= {}\n\n return group_data", "def get_data(self, pattern):\n rootgrps = list()\n for year, sorted_files in self.sorted_files.items():\n print('Opening %s' % os.path.join(self.raw_data_path, '%d' % year,\n pattern))\n for j, file_name in enumerate(sorted_files):\n if fnmatch.fnmatch(file_name, pattern):\n rootgrps.append(Dataset(os.path.join(self.raw_data_path,\n '%d' % year,\n file_name)))\n return rootgrps", "def generate_group_summary_table(self, groups, group_names=None):\n output = {\n 'patient_patches': {},\n 'slide_patches': {},\n 'patient_slides': {},\n }\n groups['chunks'].sort(key=lambda chunk: chunk['id'])\n category_names = sorted([c.name for c in self.CategoryEnum])\n cum_header = 'Overall' if self.is_binary else 'Total'\n headers = category_names + [cum_header]\n num_headers = len(headers)\n group_patches = pd.DataFrame(columns=headers)\n group_slides = pd.DataFrame(columns=headers)\n group_patients = pd.DataFrame(columns=headers)\n for chunk in groups['chunks']:\n try:\n group_name = group_names[chunk['id']]\n except (TypeError, KeyError):\n group_name = f\"Group {chunk['id'] + 1}\"\n patch_paths = chunk['imgs']\n patches = {name: set() for name in category_names}\n slides = {name: set() for name in category_names}\n patients = {name: set() for name in category_names}\n all_patches = set()\n all_slides = set()\n all_patients = set()\n patient_patches = pd.DataFrame(columns=headers)\n slide_patches = pd.DataFrame(columns=headers)\n patient_slides = pd.DataFrame(columns=headers)\n for patch_path in patch_paths:\n patch_id = utils.create_patch_id(patch_path, self.patch_pattern)\n label = utils.get_label_by_patch_id(patch_id, self.patch_pattern,\n self.CategoryEnum, is_binary=self.is_binary).name\n slide_name = utils.get_slide_by_patch_id(patch_id, self.patch_pattern)\n patient_id = utils.get_patient_by_slide_id(slide_name,\n dataset_origin=self.dataset_origin)\n\n patches[label].add(patch_id)\n\n if slide_name not in slides[label]:\n if patient_id not in patient_slides.index:\n patient_slides.loc[patient_id] = [0] * num_headers\n patient_slides.at[patient_id, label] += 1\n if slide_name not in all_slides:\n patient_slides.at[patient_id, cum_header] += 1\n \n slides[label].add(slide_name)\n patients[label].add(patient_id)\n\n if patient_id not in patient_patches.index:\n patient_patches.loc[patient_id] = [0] * num_headers\n patient_patches.at[patient_id, label] += 1\n patient_patches.at[patient_id, cum_header] += 1\n\n if slide_name not in slide_patches.index:\n slide_patches.loc[slide_name] = [0] * num_headers\n slide_patches.at[slide_name, label] += 1\n slide_patches.at[slide_name, cum_header] += 1\n\n all_patches.add(patch_id)\n all_slides.add(slide_name)\n all_patients.add(patient_id)\n\n for label, s in patches.items():\n group_patches.at[group_name, label] = len(s)\n group_patches.at[group_name, cum_header] = len(all_patches)\n for label, s in slides.items():\n group_slides.at[group_name, label] = len(s)\n group_slides.at[group_name, cum_header] = len(all_slides)\n for label, s in patients.items():\n group_patients.at[group_name, label] = len(s)\n group_patients.at[group_name, cum_header] = len(all_patients)\n\n patient_patches.loc[\"Total\"] = patient_patches.sum().astype(int)\n slide_patches.loc[\"Total\"] = slide_patches.sum().astype(int)\n patient_slides.loc[\"Total\"] = patient_slides.sum().astype(int)\n output['patient_patches'][group_name] = patient_patches\n output['slide_patches'][group_name] = slide_patches\n output['patient_slides'][group_name] = patient_slides\n \n group_patches.loc['Total'] = group_patches.sum().astype(int)\n group_slides.loc['Total'] = group_slides.sum().astype(int)\n group_patients.loc['Total'] = group_patients.sum().astype(int)\n output['group_patches'] = group_patches\n output['group_slides'] = group_slides\n output['group_patients'] = group_patients\n return output", "def simple_loadings_df(self, group_labels_file, subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):\n # make sure file exists\n if not os.path.exists(group_labels_file):\n raise FileNotFoundError('The file {} has not been found.'.format(group_labels_file))\n\n # make sure this object has been .fit()\n self._update()\n\n groups = self._parse_groups_file(group_labels_file=group_labels_file)\n patids = self._get_subject_ids(subjid_pat=subjid_pat)\n\n loads = self._load_loadings()\n\n # build the raw loadings table\n df = build_raw_loadings_table(loads, patids)\n df = add_groups_to_loadings_table(df, groups)\n return df", "def _call_data_objects_from_file_group(file_group, directory):\n call_data_object_list = []\n '''\n after reviewing call data files, assume each annual period will always come in file pairs\n '''\n file_1_w_directory = directory + \"\\\\\" + file_group[0]\n file_2_w_directory = directory + \"\\\\\" + file_group[1]\n with open(file_1_w_directory, mode='r') as call_data_file_1, open(file_2_w_directory, mode='r') as call_data_file_2:\n call_data_reader_1 = csv.reader(call_data_file_1, delimiter=\"\\t\")\n call_data_reader_2 = csv.reader(call_data_file_2, delimiter=\"\\t\")\n row_num = 0\n for row_r1, row_r2 in zip(call_data_reader_1, call_data_reader_2):\n if row_num == 0:\n header_r1 = row_r1\n header_r2 = row_r2\n id_header_index_dict = _create_id_header_index_dictionary(header_r1)\n field_header_index_dict_r1 = _create_field_header_index_dictionary(header_r1)\n field_header_index_dict_r2 = _create_field_header_index_dictionary(header_r2)\n elif row_num > 1:\n _build_call_data_object_list(\n call_data_object_list,\n id_header_index_dict,\n (field_header_index_dict_r1, field_header_index_dict_r2),\n (row_r1, row_r2),\n row_num\n )\n\n row_num += 1\n\n return call_data_object_list", "def toDataFrame(self, split=True):\n\n def cleanColumns(df):\n # Cleanup columns\n colnames = df.columns\n colnames=[c.replace('\\'','') for c in colnames]\n colnames=[c[1:] if c.startswith('/') else c for c in colnames]\n # If there is only one group, we remove the group key\n groupNames = self.groupNames\n if len(groupNames)==1:\n nChar = len(groupNames[0])\n colnames=[c[nChar+1:] for c in colnames] # +1 for the \"/\"\n df.columns = colnames\n\n fh = self['data']\n if split:\n # --- One dataframe per group. We skip group that have empty data\n dfs={}\n for group in fh.groups():\n try:\n df = group.as_dataframe(time_index=True)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = group.as_dataframe(time_index=False)\n if len(df)>0:\n dfs[group.name] = df\n if len(dfs)==1:\n dfs=dfs[group.name]\n return dfs\n else:\n # --- One dataframe with all data\n try:\n df = fh.as_dataframe(time_index=True)\n cleanColumns(df)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = fh.as_dataframe(time_index=False)\n return df", "def load_groups(files):\n groups = defaultdict(list)\n for f in files:\n d = np.load(f, allow_pickle=True)\n gkey = to_group_key(d['args'].item()._get_kwargs())\n groups[gkey].append((f, d))\n return groups", "def get_group_tables(self, group_name: str) -> Dict[str, Table]:\n if group_name in self._tables_cache:\n return self._tables_cache[group_name]\n with open(os.path.join(self.base_path, \"groups\", f\"{group_name}.json\")) as fp:\n tables = json.load(fp)\n\n name_to_table: Dict[str, Table] = {}\n for table in tables:\n name_to_table[table[\"title\"]] = parse_table(table)\n\n return name_to_table", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def __find_root_dataset_fc(self):\n\n self._data_dir = None\n for datadir in self._DATADIRS:\n datadir = os.path.abspath(os.path.join(self._pathname, datadir))\n if os.path.isdir(datadir):\n for f in sorted(os.listdir(datadir)):\n m = re.search('(\\w+)_(\\w+).(\\w+)$', f)\n if m:\n suffix = m.group(2).upper()\n extension = m.group(3).upper()\n\n if suffix == self._FC_ROOT_SUFFIX:\n self._root_dataset_pathname = os.path.abspath(os.path.join(datadir, f))\n self._sub_dataset_type = extension\n self._data_dir = datadir\n\n try:\n self._root_dataset = gdal.Open(self._root_dataset_pathname, self._eAccess)\n except (RuntimeError), e:\n self._root_dataset = None\n raise DSException(e.message)\n\n self._sub_datasets.append(self._root_dataset)\n\n if self._data_dir: # Data directory has been determined\n break # Stop searching\n\n # Perform basic checks on root dataset\n assert self._sub_dataset_type, 'Unable to determine dataset type'\n assert self._root_dataset, 'Unable to open root dataset'", "def get_datasets(h5group, prefix=''):\n for key in h5group.keys():\n h5obj = h5group[key]\n path = '{}/{}'.format(prefix, key)\n attrs = {att:val for att, val in h5obj.attrs.items()}\n\n if isinstance(h5obj, h5py.Dataset): \n \n # get metadata\n units = attrs[\"units\"] if 'units' in attrs else None\n spec = attrs[\"datatype\"] if 'datatype' in attrs else None\n \n # special handling for the nested waveform dataset\n if \"waveform/values/cumulative_length\" in path:\n nwfs = h5obj.shape[0]\n \n # must fix datatype AFTER this initial iteration\n yield (path, \"waveform\", nwfs, None, units, spec) \n elif \"waveform\" in path:\n pass\n \n # handle normal 'array<1>{real}' datasets\n else:\n yield (path, key, h5obj.shape[0], h5obj.dtype, units, spec) \n \n # test for group (go down)\n elif isinstance(h5obj, h5py.Group): \n yield from get_datasets(h5obj, path)", "def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def _read_groups(self):\n halodat = {}\n subhalodat = {}\n for n in range(self._num_files):\n with h5py.File(self._hdf_files[n], 'r') as f:\n self._read_group_properties(halodat, f, 'Group')\n self._read_group_properties(subhalodat, f, 'Subhalo')\n\n self._keys = list(halodat.keys())\n if self._subs is True:\n self._keys = list(subhalodat.keys())\n\n self.add_units_to_properties(halodat, subhalodat)\n return halodat, subhalodat", "def weighted_loadings_df(self, group_labels_file, subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):\n # make sure file exists\n if not os.path.exists(group_labels_file):\n raise FileNotFoundError('The file {} has not been found.'.format(group_labels_file))\n\n self._update()\n\n # let's first pick the simple version of the loadings\n df = self.simple_loadings_df(group_labels_file, subjid_pat=subjid_pat)\n blobs = get_largest_blobs(self._icc_imgs)\n\n masks = [apply_mask(ic_map, blob) for ic_map, blob in zip(self._icc_imgs, blobs)]\n\n blob_avgs = [mask.mean() for mask in masks]\n\n blob_signs = np.sign(blob_avgs)\n n_ics = len(blob_avgs)\n df[list(range(1, n_ics+1))] = df[list(range(1, n_ics+1))] * blob_signs\n return df", "def group_files(self, files):\n\n file_list = [os.path.abspath(filename) for filename in files]\n grouped_files = {}\n all_schemas = {}\n missing_files = []\n processed_files = []\n\n for filename in file_list:\n # Get the (probably versioned) filename, and save the data:\n root, _, fname = filename.rpartition(os.sep)\n\n data = DocGenUtilities.load_as_json(filename)\n\n schema_name = SchemaTraverser.find_schema_name(fname, data)\n if schema_name is None: continue\n\n normalized_uri = self.construct_uri_for_filename(filename)\n self.schema_ref_to_filename[normalized_uri] = filename\n\n # Schemas listed in 'common_object_schemas' in config should not get first-class sections.\n schema_name_parts = schema_name.split('.')\n unversioned_schema_name = schema_name_parts[0]\n has_common_object_override = False\n if self.config.get('reference_disposition', {}).get(unversioned_schema_name) == 'common_object':\n has_common_object_override = True\n if 'common_object_schemas' not in self.config:\n self.config['common_object_schemas'] = []\n self.config['common_object_schemas'].append(normalized_uri)\n\n data['_schema_name'] = schema_name\n all_schemas[normalized_uri] = data\n\n if filename in processed_files: continue\n\n ref = ''\n if '$ref' in data:\n ref = data['$ref'][1:] # drop initial '#'\n\n # This is a special case for resources like Redundancy, in which there is no $ref but we still need\n # to capture its versioned/unversioned data ... and then we want it to appear in the Common Properties\n # section if it's referred to.\n if not ref and has_common_object_override:\n ref = '/definitions/' + unversioned_schema_name\n if not ref:\n continue\n\n if fname.count('.') > 1:\n continue\n\n original_ref = ref\n for pathpart in ref.split('/'):\n if not pathpart: continue\n data = data[pathpart]\n\n ref_files = []\n ref_files_by_version = {}\n\n # is_versioned_schema will be True if there is an \"anyOf\" pointing to one or more versioned files.\n is_versioned_schema = False\n\n # is_collection_of will contain the type of objects in the collection.\n is_collection_of = None\n\n if 'anyOf' in data:\n\n for obj in data['anyOf']:\n if '$ref' in obj:\n refpath_uri, refpath_path = obj['$ref'].split('#')\n if refpath_path == '/definitions/idRef':\n continue\n ref_fn = refpath_uri.split('/')[-1]\n\n version_string = DocGenUtilities.get_ref_version(ref_fn)\n ref_filename = os.path.abspath(os.path.join(root, ref_fn))\n\n # If we're in subset mode, check the version and skip if > subset spec:\n subset = self.config.get('subset_resources', {}).get(schema_name, {})\n if subset.get('Version'):\n version_len = len(subset['Version'].split('.'))\n major_minor_version = '.'.join(version_string.split('.')[0:version_len])\n compare = DocGenUtilities.compare_versions(major_minor_version, subset['Version'])\n if compare > 0:\n continue\n\n\n # Skip files that are not present.\n if ref_filename in file_list:\n file_data = {'root': root,\n 'filename': ref_fn,\n 'ref': refpath_path,\n 'schema_name': schema_name,\n 'version': version_string}\n if version_string not in ref_files_by_version:\n ref_files_by_version[version_string] = [ file_data ]\n else:\n ref_files_by_version[version_string].append(file_data) # Unexpected, but roll with it.\n elif ref_filename not in missing_files:\n missing_files.append(ref_filename)\n\n else:\n # If there is anything that's not a ref, this isn't an unversioned schema.\n # It's probably a Collection. Zero out ref_files and skip the rest so we\n # can save this as a single-file group.\n if 'properties' in obj:\n if 'Members' in obj['properties']:\n # It's a collection. What is it a collection of?\n member_ref = obj['properties']['Members'].get('items', {}).get('$ref')\n if member_ref:\n is_collection_of = self.normalize_ref(member_ref)\n ref_files = []\n continue\n\n # Sort the ref_files by version.\n version_keys = sorted(ref_files_by_version.keys(), key=functools.cmp_to_key(DocGenUtilities.compare_versions))\n for vk in version_keys:\n for file_data in ref_files_by_version[vk]:\n ref_files.append(file_data)\n\n elif '$ref' in data:\n refpath_uri, refpath_path = data['$ref'].split('#')\n if refpath_path == '/definitions/idRef':\n continue\n\n ref_fn = refpath_uri.split('/')[-1]\n # Skip files that are not present.\n ref_filename = os.path.abspath(os.path.join(root, ref_fn))\n if ref_filename in file_list:\n ref_files.append({'root': root,\n 'filename': ref_fn,\n 'ref': refpath_path,\n 'schema_name': schema_name})\n elif ref_filename not in missing_files:\n missing_files.append(ref_filename)\n\n else:\n ref = original_ref\n\n if 'uris' in data:\n # Stash these in the unversioned schema_data.\n all_schemas[normalized_uri]['_uris'] = data['uris']\n \n if 'urisDeprecated' in data:\n # Stash the deprecated URIs as well\n all_schemas[normalized_uri]['_urisDeprecated'] = data['urisDeprecated']\n\n if len(ref_files):\n # Add the _is_versioned_schema and is_collection_of hints to each ref object\n is_versioned_schema = True\n [x.update({'_is_versioned_schema': is_versioned_schema, '_is_collection_of': is_collection_of})\n for x in ref_files]\n grouped_files[normalized_uri] = ref_files\n\n if not normalized_uri in grouped_files:\n # this is not an unversioned schema after all.\n grouped_files[normalized_uri] = [{'root': root,\n 'filename': fname,\n 'ref': ref,\n 'schema_name': schema_name,\n '_is_versioned_schema': is_versioned_schema,\n '_is_collection_of': is_collection_of}]\n\n # Note these files as processed:\n processed_files.append(filename)\n for file_refs in grouped_files[normalized_uri]:\n ref_filename = os.path.join(file_refs['root'], file_refs['filename'])\n processed_files.append(ref_filename)\n\n if len(missing_files):\n numfiles = len(missing_files)\n if numfiles <= 10:\n missing_files_list = '\\n '.join(missing_files)\n else:\n missing_files_list = '\\n '.join(missing_files[0:9]) + \"\\n \" + 'and %(number)s more.' % {'number': str(numfiles - 10)}\n warnings.warn( '%(number)s referenced files were missing: ' % {'number': str(numfiles)} + \"\\n \" + missing_files_list)\n\n return grouped_files, all_schemas" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Guess an appropriate chunk layout for an array, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some powerof2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning.
def guess_chunks(shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]: ndims = len(shape) # require chunks to have non-zero length for all dimensions chunks = np.maximum(np.array(shape, dtype="=f8"), 1) # Determine the optimal chunk size in bytes using a PyTables expression. # This is kept as a float. dset_size = np.prod(chunks) * typesize target_size = CHUNK_BASE * (2 ** np.log10(dset_size / (1024.0 * 1024))) if target_size > CHUNK_MAX: target_size = CHUNK_MAX elif target_size < CHUNK_MIN: target_size = CHUNK_MIN idx = 0 while True: # Repeatedly loop over the axes, dividing them by 2. Stop when: # 1a. We're smaller than the target chunk size, OR # 1b. We're within 50% of the target chunk size, AND # 2. The chunk is smaller than the maximum chunk size chunk_bytes = np.prod(chunks) * typesize if ( chunk_bytes < target_size or abs(chunk_bytes - target_size) / target_size < 0.5 ) and chunk_bytes < CHUNK_MAX: break if np.prod(chunks) == 1: break # Element size larger than CHUNK_MAX chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0) idx += 1 return tuple(int(x) for x in chunks)
[ "def expandChunk(layout, typesize, shape_json, chunk_min=CHUNK_MIN, layout_class='H5D_CHUNKED'):\n if shape_json is None or shape_json[\"class\"] == 'H5S_NULL':\n return None\n if shape_json[\"class\"] == 'H5S_SCALAR':\n return (1,) # just enough to store one item\n\n layout = list(layout)\n dims = shape_json[\"dims\"]\n rank = len(dims)\n extendable_dims = 0 # number of dimensions that are extenable\n maxdims = None\n if \"maxdims\" in shape_json:\n maxdims = shape_json[\"maxdims\"]\n for n in range(rank):\n if maxdims[n] == 0 or maxdims[n] > dims[n]:\n extendable_dims += 1\n\n dset_size = get_dset_size(shape_json, typesize)\n if dset_size <= chunk_min and extendable_dims == 0:\n # just use the entire dataspace shape as one big chunk\n return tuple(dims)\n\n chunk_size = getChunkSize(layout, typesize)\n if chunk_size >= chunk_min:\n return tuple(layout) # good already\n while chunk_size < chunk_min:\n # just adjust along extendable dimensions first\n old_chunk_size = chunk_size\n for n in range(rank):\n dim = rank - n - 1 # start from\n\n if extendable_dims > 0:\n if maxdims[dim] == 0:\n # infinately extendable dimensions\n layout[dim] *= 2\n chunk_size = getChunkSize(layout, typesize)\n if chunk_size > chunk_min:\n break\n elif maxdims[dim] > layout[dim]:\n # can only be extended so much\n layout[dim] *= 2\n if layout[dim] >= dims[dim]:\n layout[dim] = maxdims[dim] # trim back\n extendable_dims -= 1 # one less extenable dimension\n\n chunk_size = getChunkSize(layout, typesize)\n if chunk_size > chunk_min:\n break\n else:\n pass # ignore non-extensible for now\n else:\n # no extendable dimensions\n if dims[dim] > layout[dim]:\n # can expand chunk along this dimension\n layout[dim] *= 2\n if layout[dim] > dims[dim]:\n layout[dim] = dims[dim] # trim back\n chunk_size = getChunkSize(layout, typesize)\n if chunk_size > chunk_min:\n break\n else:\n pass # can't extend chunk along this dimension\n if chunk_size <= old_chunk_size:\n # reality check to see if we'll ever break out of the while loop\n log.warn(\"Unexpected error in guess_chunk size\")\n\n break\n elif chunk_size > chunk_min:\n break # we're good\n else:\n pass # do another round\n return tuple(layout)", "def shrinkChunk(layout, typesize, chunk_max=CHUNK_MAX, layout_class='H5D_CHUNKED'):\n layout = list(layout)\n chunk_size = getChunkSize(layout, typesize)\n if chunk_size <= chunk_max:\n return tuple(layout) # good already\n rank = len(layout)\n\n while chunk_size > chunk_max:\n # just adjust along extendable dimensions first\n old_chunk_size = chunk_size\n for dim in range(rank):\n if layout[dim] > 1:\n layout[dim] //= 2\n chunk_size = getChunkSize(layout, typesize)\n if chunk_size <= chunk_max:\n break\n else:\n pass # can't shrink chunk along this dimension\n if chunk_size >= old_chunk_size:\n # reality check to see if we'll ever break out of the while loop\n log.warning(\"Unexpected error in shrink_chunk\")\n break\n elif chunk_size <= chunk_max:\n break # we're good\n else:\n pass # do another round\n return tuple(layout)", "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")", "def getChunkSize(layout, type_size):\n if type_size == 'H5T_VARIABLE':\n type_size = DEFAULT_TYPE_SIZE\n\n chunk_size = type_size\n for n in layout:\n if n <= 0:\n raise ValueError(\"Invalid chunk layout\")\n chunk_size *= n\n return chunk_size", "def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:\n\n # N.B., expect shape already normalized\n\n # handle auto-chunking\n if chunks is None or chunks is True:\n return guess_chunks(shape, typesize)\n\n # handle no chunking\n if chunks is False:\n return shape\n\n # handle 1D convenience form\n if isinstance(chunks, numbers.Integral):\n chunks = tuple(int(chunks) for _ in shape)\n\n # handle bad dimensionality\n if len(chunks) > len(shape):\n raise ValueError(\"too many dimensions in chunks\")\n\n # handle underspecified chunks\n if len(chunks) < len(shape):\n # assume chunks across remaining dimensions\n chunks += shape[len(chunks) :]\n\n # handle None or -1 in chunks\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks))\n\n chunks = tuple(int(c) for c in chunks)\n return chunks", "def test_chunk_size(self):\n for chunk_size, expected_n_chunks in [(1, 100), (3, 34), (200, 1), (None, 1)]:\n with self.subTest(chunk_size=chunk_size):\n iterable_of_args, iterable_len, chunk_size_, n_splits = apply_numpy_chunking(\n self.test_data_numpy, chunk_size=chunk_size, n_splits=1\n )\n\n # Materialize generator and test contents. The chunks should be of size chunk_size (expect for the last\n # chunk which can be smaller)\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n chunk_size = chunk_size or 100\n for chunk_idx, chunk in enumerate(iterable_of_args):\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[chunk_idx * chunk_size:\n (chunk_idx + 1) * chunk_size])\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size_, 1)\n self.assertIsNone(n_splits)", "def perform_chunking(self, data_size, chunk_size):\r\n\r\n chunks, i = [], 0\r\n while True:\r\n chunks.append((i * (chunk_size - self.overlap / 2), i * (chunk_size - self.overlap / 2) + chunk_size))\r\n i += 1\r\n if chunks[-1][1] > data_size:\r\n break\r\n\r\n n_count = len(chunks)\r\n chunks[-1] = tuple(x - (n_count * chunk_size - data_size - (n_count - 1) * self.overlap / 2) for x in chunks[-1])\r\n chunks = [(int(x), int(y)) for x, y in chunks]\r\n return chunks", "def get_chunks(num_items, num_steps):\n chunk_sizes = np.zeros(num_steps, dtype=int)\n chunk_sizes[:] = num_items // num_steps\n chunk_sizes[:num_items % num_steps] += 1\n\n chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)\n chunk_offsets[0] = 0\n return chunk_sizes, chunk_offsets", "def perform_chunking(self, img_size, chunk_size):\n chunks, i = [], 0\n while True:\n chunks.append((i*(chunk_size - self.overlap/2), i*(chunk_size - self.overlap/2)+chunk_size))\n i+=1\n if chunks[-1][1] > img_size:\n break\n n_count = len(chunks) \n chunks[-1] = tuple(x - (n_count*chunk_size - img_size - (n_count-1)*self.overlap/2) for x in chunks[-1])\n chunks = [(int(x), int(y)) for x, y in chunks]\n return chunks", "def get_chunks(data, chunks=None):\n if chunks is None:\n\n if hasattr(data, 'chunklen') and hasattr(data, 'shape'):\n # bcolz carray, chunk first dimension only\n return (data.chunklen,) + data.shape[1:]\n\n elif hasattr(data, 'chunks') and hasattr(data, 'shape') and \\\n len(data.chunks) == len(data.shape):\n # h5py dataset\n return data.chunks\n\n else:\n # fall back to something simple, ~1Mb chunks of first dimension\n row = np.asarray(data[0])\n chunklen = max(1, (2**20) // row.nbytes)\n if row.shape:\n chunks = (chunklen,) + row.shape\n else:\n chunks = (chunklen,)\n return chunks\n\n else:\n\n return chunks", "def iter_slices(shape, chunk_size):\n assert len(shape) == len(chunk_size)\n num_grid_chunks = [int(ceil(s / float(c))) for s, c in zip(shape, chunk_size)]\n for grid_index in numpy.ndindex(*num_grid_chunks):\n yield tuple(\n slice(min(d * c, stop), min((d + 1) * c, stop)) for d, c, stop in zip(grid_index, chunk_size, shape))", "def get_load_strategy(\n buffer_mem_size,\n cs,\n original_array_blocks_shape,\n nb=4):\n \n block_mem_size = cs[0] * cs[1] * cs[2] * nb\n strategy = \"blocks\" # for the moment, let the strategy be blocks only\n \n if (buffer_mem_size < block_mem_size):\n msg = \"Not enough memory to store one block!\"\n print(msg)\n raise ValueError(msg)\n max_blocks_per_load = math.floor(buffer_mem_size / block_mem_size)\n\n logging.debug(f'Memory available: {buffer_mem_size}')\n logging.debug(f'Chunk_shape: {cs}')\n logging.debug(f'Block_mem_size: {block_mem_size}')\n logging.debug(f'Strategy: {strategy}')\n logging.debug(f'Max nb blocks per load: {max_blocks_per_load}')\n return strategy, max_blocks_per_load", "def get_chunks(data, chunks=None):\n\n if chunks is None:\n\n if hasattr(data, 'chunks') and hasattr(data, 'shape') and \\\n len(data.chunks) == len(data.shape):\n # h5py dataset or zarr array\n return data.chunks\n\n else:\n # fall back to something simple, ~4Mb chunks of first dimension\n row = np.asarray(data[0])\n chunklen = max(1, (2**22) // row.nbytes)\n if row.shape:\n chunks = (chunklen,) + row.shape\n else:\n chunks = (chunklen,)\n return chunks\n\n else:\n\n return chunks", "def get_n_chunks(self, chunksize: int, batch_size: int):\n return int(math.ceil(self._grid_raysampler._xy_grid.numel() * 0.5 * batch_size / chunksize))", "def split_array(array: np.ndarray, parts: int):\n\n if parts == -1:\n parts = array.size\n shape = array.shape\n possible_chunk_sizes = []\n # Generate all possible chunk sizes for the given array shape\n for chunk_size in product(*[range(1, shape[i] + 1) for i in range(len(shape))]):\n # Check if the number of chunks generated by the current chunk size is equal to the desired number of parts\n if np.prod(\n [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]) == parts:\n possible_chunk_sizes.append(chunk_size)\n # Sort the possible chunk sizes in ascending order of the sum of the squares of their dimensions\n possible_chunk_sizes.sort(key=lambda x: np.sum(np.array(x) ** 2)) # type: ignore\n if not possible_chunk_sizes:\n logging.warning(\"Could not divide the domain in %d parts. Trying with parts=%d.\", parts, parts - 1)\n return split_array(array=array, parts=parts - 1)\n selected_chunk_size = possible_chunk_sizes[0]\n\n chunks = []\n # Get the number of chunks for the first possible chunk size\n num_chunks = [shape[i] // selected_chunk_size[i] + int(shape[i] % selected_chunk_size[i] != 0) for i in\n range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n # Iterate over the chunks and append the corresponding slice of the array to the chunks list\n for indx in product(*indexes):\n current_slice = tuple(\n slice(selected_chunk_size[i] * indx[i], min(selected_chunk_size[i] * (indx[i] + 1), shape[i])) for i in\n range(len(shape)))\n chunks.append(array[current_slice])\n return chunks", "def look_for_biggest_structure(game, chunk, imgs, hmap, nmax, type_):\n for n in range(nmax,0,-1):\n i = 0\n m = parameters.MAX_VILLAGE_WIDTH * n / parameters.MAX_VILLAGE_SIZE\n while i < parameters.VILLAGE_TRY:\n chunkpos = np.random.randint(0,parameters.S,2)\n cx,cy = chunkpos\n h = np.sum(hmap[cx:cx+m,cy:cy+m]) / (m*m)\n if h > parameters.VILLAGE_LEVEL:\n force_build_structure(game, imgs, chunk, chunkpos, n, type_)\n return n\n i += 1\n return 0", "def splitMaxSized(l, batchMaxSize):\n batchCount = 1\n if batchMaxSize is not None and batchMaxSize > 0:\n batchCount = math.ceil(len(l) / batchMaxSize)\n return split(l, batchCount)", "def _scale_shape_to_size(size_mb, shape, size, max_shape):\n k = np.floor((size_mb / size) ** (1 / len(shape)))\n return tuple([min(max(int(x), shape[j]), max_shape[j]) for j, x in enumerate(k * np.array(shape))])", "def reshape_as_blocks(data, block_size):\n data, block_size = _process_block_inputs(data, block_size)\n\n if np.any(np.mod(data.shape, block_size) != 0):\n raise ValueError(\n \"Each dimension of block_size must divide evenly \"\n \"into the corresponding dimension of data\"\n )\n\n nblocks = np.array(data.shape) // block_size\n new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)\n nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices\n block_idx = tuple(range(1, len(new_shape), 2)) # odd indices\n\n return data.reshape(new_shape).transpose(nblocks_idx + block_idx)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function to normalize the `chunks` argument for an array with the given `shape`.
def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]: # N.B., expect shape already normalized # handle auto-chunking if chunks is None or chunks is True: return guess_chunks(shape, typesize) # handle no chunking if chunks is False: return shape # handle 1D convenience form if isinstance(chunks, numbers.Integral): chunks = tuple(int(chunks) for _ in shape) # handle bad dimensionality if len(chunks) > len(shape): raise ValueError("too many dimensions in chunks") # handle underspecified chunks if len(chunks) < len(shape): # assume chunks across remaining dimensions chunks += shape[len(chunks) :] # handle None or -1 in chunks if -1 in chunks or None in chunks: chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks)) chunks = tuple(int(c) for c in chunks) return chunks
[ "def normalize(shape):\n s = shape\n matrix = Shape.get_matrix(s.get_vector())\n norm_x = math.sqrt(sum(matrix[:, 0] ** 2))\n norm_y = math.sqrt(sum(matrix[:, 1] ** 2))\n for pt in s.pts:\n pt.x /= norm_x\n pt.y /= norm_y\n return s", "def normalize(arr, axis=1):\n _arr = arr if type(arr) == np.ndarray else np.array(arr)\n if len(_arr.shape) == 1:\n return normalize([arr])[0]\n else:\n return _arr / _arr.sum(axis=axis)[:, np.newaxis]", "def normalize_array(array):\n\n return array / np.sum(array, axis=1)[:, np.newaxis]", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def normalize_chunks(\n chunks: Mapping[str, Union[int, Tuple[int, ...]]],\n dim_sizes: Mapping[str, int],\n) -> Dict[str, int]:\n if not chunks.keys() <= dim_sizes.keys():\n raise ValueError(\n 'all dimensions used in chunks must also have an indicated size: '\n f'chunks={chunks} vs dim_sizes={dim_sizes}')\n result = {}\n for dim, size in dim_sizes.items():\n if dim not in chunks:\n result[dim] = size\n elif isinstance(chunks[dim], tuple):\n unique_chunks = set(chunks[dim])\n if len(unique_chunks) != 1:\n raise ValueError(\n f'chunks for dimension {dim} are not constant: {unique_chunks}',\n )\n result[dim], = unique_chunks\n elif chunks[dim] == -1:\n result[dim] = size\n else:\n result[dim] = chunks[dim]\n return result", "def normalize(arr):\n norm_arr = (arr - np.mean(arr)) / np.std(arr)\n return norm_arr", "def _normalize_array(array, mean, std):\n if isinstance(array, torch.Tensor):\n dev = array.device\n std = torch.tensor(std, device=dev)\n mean = torch.tensor(mean, device=dev)\n return (array - mean) / std", "def array_rebin(data, shape):\n\n # Ensure dimensions are consistent\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n\n # Get pairs of (shape, bin factor) for each dimension\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n\n # Rebin the array\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data", "def _fit_array_to_image(base_shape, array: np.ndarray) -> np.ndarray:\n shape = list(array.shape)\n for i, el in enumerate(base_shape):\n if el == 1 and el != shape[i]:\n shape.insert(i, 1)\n elif el != shape[i]:\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n if len(shape) != len(base_shape):\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n return np.reshape(array, shape)", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def _normalize_sequence(arr, rank):\n if hasattr(arr, \"__iter__\") and not isinstance(arr, str):\n if isinstance(arr, cupy.ndarray):\n arr = cupy.asnumpy(arr)\n normalized = list(arr)\n if len(normalized) != rank:\n err = \"sequence argument must have length equal to arr rank\"\n raise RuntimeError(err)\n else:\n normalized = [arr] * rank\n return normalized", "def reshape(arr, shape):\n if -1 in shape:\n # e.g., NUMERIX.reshape(a, (-1, N)) fails if N == 0\n oldShape = array(getShape(arr))\n oldShape[oldShape == 0] = 1\n\n if hasattr(shape, 'index'):\n index = shape.index(-1)\n else:\n index = list(shape).index(-1)\n\n left = shape[:index]\n right = shape[index+1:]\n newShape = array(left + right)\n newShape[newShape == 0] = 1\n\n shape = left + (oldShape.prod() // newShape.prod(),) + right\n\n if _isPhysical(arr):\n return arr.reshape(shape)\n elif isinstance(arr, type(array((0)))):\n return NUMERIX.reshape(arr, tuple(shape))\n elif isinstance(arr, type(MA.array((0)))):\n return MA.reshape(arr, shape)\n else:\n return NUMERIX.reshape(array(arr), tuple(shape))", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(array):\n\n mean = np.mean(array, axis=0)\n std = np.std(array, axis=0)\n\n norm_array = (array - mean) / std\n\n return norm_array", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def __normalize_after_fft(arr):\n\n n1, n2 = arr.shape[0], arr.shape[1]\n for i in range(n1):\n for j in range(n2):\n arr[i, j] *= n1 * n2\n\n return arr", "def normalize(v):\n\tdim = v.shape \n\tfor i in range(0, dim[0]-1):\n\t\tv[i,:,:] = (v[i,:,:].T/np.sum(v[i,:,:],1)).T\n\n\treturn v", "def reshape(x, shape):\n return Reshape(shape)(x)", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether `item` specifies a complete slice of array with the given `shape`. Used to optimize __setitem__ operations on the Chunk class.
def is_total_slice(item, shape: Tuple[int]) -> bool: # N.B., assume shape is normalized if item == Ellipsis: return True if item == slice(None): return True if isinstance(item, slice): item = (item,) if isinstance(item, tuple): return all( ( isinstance(it, slice) and ((it == slice(None)) or ((it.stop - it.start == sh) and (it.step in [1, None]))) ) for it, sh in zip(item, shape) ) else: raise TypeError("expected slice or tuple of slices, found %r" % item)
[ "def _is_total_slice(item, shape):\n\n # N.B., assume shape is normalized\n\n if item == Ellipsis:\n return True\n if item == slice(None):\n return True\n if isinstance(item, slice):\n item = item,\n if isinstance(item, tuple):\n return all(\n (isinstance(s, slice) and\n ((s == slice(None)) or\n ((s.stop - s.start == l) and (s.step in [1, None]))))\n for s, l in zip(item, shape)\n )\n else:\n raise TypeError(\"expected slice or tuple of slices, found %r\" % item)", "def is_satisfied(self, item: Product) -> bool:\n return item.size == self.size", "def point_in_shape(shape, i, j):\n if shape.type == 0:\n if (i >= shape.x) and (i <= shape.width + shape.x):\n if (j >= shape.y) and (j <= shape.height + shape.y):\n return True\n else:\n length = math.sqrt(abs(shape.x - i)**2 + abs(shape.y - j)**2)\n if length <= (shape.width//2):\n return True\n return False", "def isSlice(self):\r\n return self._wrap(type(self.obj) is type(slice))", "def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside", "def _is_dim_removed_by_splitting(cls, graph: NNCFGraph, node: NNCFNode) -> Optional[int]:\n split_axis = None\n if isinstance(node.layer_attributes, GetItemLayerAttributes):\n input_edge = graph.get_input_edges(node)[0]\n input_shape = input_edge.tensor_shape\n parent_node = input_edge.from_node\n child_nodes = graph.get_next_nodes(parent_node)\n child_attributes = [cnode.layer_attributes for cnode in child_nodes]\n all_getitem = all(isinstance(ca, GetItemLayerAttributes) for ca in child_attributes)\n assert all_getitem, \"currently supported only case with all __getitem__ on branches\"\n all_int_keys = all(isinstance(ca.key, int) for ca in child_attributes)\n # currently supported only case __getitem__ with single int, no slices\n if not all_int_keys:\n return None\n all_keys = set(ca.key for ca in child_attributes)\n split_dim = input_shape[0]\n if all_keys == set(range(split_dim)):\n split_axis = 0\n return split_axis", "def _is_simple_slice(key):\n return isinstance(key, tuple) and len(key) > 0 and isinstance(key[0], slice) and np.all([ (k is slice and k == np.s_[:]) or k is Ellipsis for k in key[1:]])", "def is_slice(self) -> bool:\n return self._is_slice", "def has_shape(a):\n try:\n a.shape\n return True\n except AttributeError:\n return False", "def has_shape(node):\n allowed_shapes = (\n pm.nt.Mesh,\n pm.nt.NurbsCurve,\n pm.nt.NurbsSurface\n )\n\n has_it = False\n\n children = node.getChildren()\n while len(children) and not has_it:\n child = children.pop(0)\n if isinstance(child, allowed_shapes):\n has_it = True\n break\n children += child.getChildren()\n\n return has_it", "def IsVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def isItem(self, pos):\n (l, c) = pos\n return self.tab[l][c] < -1 or pos in self.items", "def isItemPickable(self, item):\n return self._condition is None or self._condition(item)", "def owns_shape(self, pm_shape):\n all_shapes = [part.pm_visible_shape for part in self.parts]\n if pm_shape in all_shapes:\n return True\n return False", "def test_user_item(self):\n returnshape = testsparse_item_user.get_shape()\n testshape = ((testdf['item_id'].nunique()),\n (testdf['user_id'].nunique()))\n self.assertEqual(testshape, returnshape)", "def has_shape(self):\n if self.shape is None: return False\n else:\n return True", "def check_data_shape(x, shape=()):\n x = np.array(x)\n\n idx = x.ndim - len(shape)\n if x.shape[idx:] == shape:\n set_shape = x.shape[:idx]\n else:\n raise TypeError(f\"Trailing dimensions of 'x.shape' must be equal to {shape}.\")\n\n return x, set_shape", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def test_item_user(self):\n returnshape = testsparse_user_item.get_shape()\n testshape = ((testdf['user_id'].nunique()),\n (testdf['item_id'].nunique()))\n self.assertEqual(testshape, returnshape)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if all the elements of an array are equivalent to a value. If `value` is None, then this function does not do any comparison and returns False.
def all_equal(value: Any, array: Any): if value is None: return False if not value: # if `value` is falsey, then just 1 truthy value in `array` # is sufficient to return False. We assume here that np.any is # optimized to return on the first truthy value in `array`. try: return not np.any(array) except (TypeError, ValueError): # pragma: no cover pass if np.issubdtype(array.dtype, np.object_): # we have to flatten the result of np.equal to handle outputs like # [np.array([True,True]), True, True] return all(flatten(np.equal(value, array, dtype=array.dtype))) else: # Numpy errors if you call np.isnan on custom dtypes, so ensure # we are working with floats before calling isnan if np.issubdtype(array.dtype, np.floating) and np.isnan(value): return np.all(np.isnan(array)) else: # using == raises warnings from numpy deprecated pattern, but # using np.equal() raises type errors for structured dtypes... return np.all(value == array)
[ "def all_equal(array):\n if not array:\n raise ValueError(\"Array is empty\")\n\n first_item = array[0]\n\n if any(item != first_item for item in array):\n return False\n\n return True", "def has_equal_values_vec(x):\n return jnp.all(x == x[0])", "def check_array(self, array: ArrayData, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n assert len(value) <= len(array), \"Array to compare against must contain a smaller or equal amount of elements.\"\n expected = self.array(value).name\n actual = \"la a2, \" + self._lookup_array(array)\n self._compare_int_array(array.name, actual, expected, value, exit_code = 2)", "def all_array_equal(array_list: List[np.ndarray]) -> bool:\n return all([np.allclose(array_list[0], a) for a in array_list[1:]])", "def same_value(*values):\r\n same = all(val1 == val2 for val1, val2 in pairwise(values))\r\n # Some types are not quite as simple.\r\n # Date-times & floats can have slight variations even when essentially the same.\r\n if all(isinstance(val, datetime.datetime) for val in values):\r\n same = all(\r\n abs(val1 - val2).total_seconds() < 10 ** -64\r\n for val1, val2 in pairwise(values)\r\n )\r\n if all(isinstance(val, float) for val in values):\r\n same = all(math.isclose(val1, val2) for val1, val2 in pairwise(values))\r\n # Geometry equality has extra considerations.\r\n elif all(isinstance(val, (arcpy.Geometry, arcpy.Point)) for val in values):\r\n same = all(val1.equals(val2) for val1, val2 in pairwise(values))\r\n return same", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def is_allval(self):\n\n tups = self.tuples\n\n return tups[-1][1] == 0 and tups[0][1] != 0", "def is_none_or_nan_array(x: Union[Number, np.ndarray, None]) -> bool:\n return x is None or np.isnan(x).all()", "def array_equals_nodata(array, nodata):\n # If nodata is undefined, nothing matches nodata.\n if nodata is None:\n return numpy.zeros(array.shape, dtype=bool)\n\n # comparing an integer array against numpy.nan works correctly and is\n # faster than using numpy.isclose().\n if numpy.issubdtype(array.dtype, numpy.integer):\n return array == nodata\n return numpy.isclose(array, nodata, equal_nan=True)", "def _return_if_equal_operation(arrays: np.ndarray) -> bool:\n if _all_equal(arrays):\n return arrays[0]\n raise ValueError(\"Cannot merge given arrays because their values are not the same.\")", "def check_all_iterable_values_equal(iterable):\n return all(second_value_onwards == iterable[0] for second_value_onwards in iterable[1:])", "def _has_at_least_one_value(self, i, values):\n for a in values:\n j = self.attributes.index(a)\n v = values[a]\n if self[i][j] == v:\n return True\n return False", "def test_if_unique(array):\n\t\n\treturn np.unique(array).size == array.size", "def equals(self, other):\n if hasattr(other, 'values'):\n other = other.values\n if not hasattr(other, 'shape'):\n other = np.array(other)\n return np.all(self.values == other)", "def __is_all_none(self, v):\n if v is None:\n return True\n if type(v) == list:\n return all(map(lambda e: self.__is_all_none(e), v))\n return False", "def is_array(value):\n from numpy import ndarray\n is_array = isinstance(value, ndarray)\n return is_array", "def check_solved(self, values):\n if values == None: #Forward_checking determines that values state is invalid -> set false, check if false here.\n return False\n\n for box in values.keys():\n if len(values[box]) != 1:\n return False\n return True", "def array_equal_to(obj):\n return ArrayIsEqual(obj)", "def vany(*args: A) -> bool:\n return any(bool(a) for a in args) if args else False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function to coerce `buf` to ndarraylike array or bytes. First check if `buf` can be zerocopy converted to a contiguous array. If not, `buf` will be copied to a newly allocated `bytes` object.
def ensure_contiguous_ndarray_or_bytes(buf) -> Union[NDArrayLike, bytes]: try: return ensure_contiguous_ndarray_like(buf) except TypeError: # An error is raised if `buf` couldn't be zero-copy converted return ensure_bytes(buf)
[ "def buffer_to_bytes(buf):\n if not isinstance(buf, bytes):\n buf = bytes(buf)\n return buf", "def _byte_buffer(cls, data: np.ndarray) -> np.ndarray:\n view = data.view()\n view.shape = (data.size,) # Reshape while disallowing copy\n return view.view(np.uint8)", "def test_array_as_buffer(parser):\n doc = parser.parse(b'''{\n \"d\": [1.2, 2.3, 3.4],\n \"i\": [-1, 2, -3, 4],\n \"u\": [1, 2, 3, 4, 5],\n \"x\": [1, 2, 3, \"not valid\"]\n }''')\n\n memoryview(doc['d'].as_buffer(of_type='d'))\n memoryview(doc['i'].as_buffer(of_type='i'))\n memoryview(doc['u'].as_buffer(of_type='u'))\n\n # Not a valid `of_type`.\n with pytest.raises(ValueError):\n doc['i'].as_buffer(of_type='x')\n\n # Not a valid homogeneous array.\n with pytest.raises(TypeError):\n doc['x'].as_buffer(of_type='u')\n\n # Signed elements should error on cast.\n with pytest.raises(ValueError):\n doc['i'].as_buffer(of_type='u')", "def ensure_memoryview(obj: bytes | bytearray | memoryview | PickleBuffer) -> memoryview:\n if not isinstance(obj, memoryview):\n obj = memoryview(obj)\n\n if not obj.nbytes:\n # Drop `obj` reference to permit freeing underlying data\n return memoryview(bytearray())\n elif not obj.contiguous:\n # Copy to contiguous form of expected shape & type\n return memoryview(bytearray(obj))\n elif obj.ndim != 1 or obj.format != \"B\":\n # Perform zero-copy reshape & cast\n # Use `PickleBuffer.raw()` as `memoryview.cast()` fails with F-order\n # xref: https://github.com/python/cpython/issues/91484\n return PickleBuffer(obj).raw()\n else:\n # Return `memoryview` as it already meets requirements\n return obj", "def buffer_array(arr, buffer=0):\n if not isinstance(arr, np.ndarray):\n raise TypeError(\"not a NumPy array\")\n elif arr.ndim != 2:\n raise TypeError(\"array not 2-dimensional\")\n elif arr.dtype != bool:\n arr = arr.astype(bool)\n\n if buffer == 0 or not arr.any():\n return arr.astype(bool, copy=False)\n else:\n return rasterio.features.geometry_mask(\n (\n shape(p).buffer(buffer)\n for p, v in\n rasterio.features.shapes(arr.astype(np.uint8, copy=False), mask=arr)\n if v\n ),\n arr.shape,\n Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0),\n invert=True\n ).astype(bool, copy=False)", "def _cast_uint8_ndarray(a):\n\n if isinstance(a, numpy.ndarray):\n if a.dtype == 'uint8':\n return a\n else:\n return numpy.array(a, dtype=numpy.uint8)\n elif isinstance(a, bytes):\n return numpy.array(bytearray(a), dtype=numpy.uint8)\n else:\n return numpy.array(a, dtype=numpy.uint8)", "def to_bytearray(x):\n if isinstance(x, bytearray):\n return x\n else:\n return bytearray(x)", "def _coerce_to_np_array(\n data: list | np.ndarray | pd.Series,\n output_type: dt.Struct,\n index: pd.Index | None = None,\n) -> np.ndarray:\n return np.array(data)", "def unpack(self, buf):\n self.array = np.frombuffer(buf, dtype=self.dtype)[0]", "def as_ndarray(arr, copy=False, dtype=None, order='K'):\n # This function should work on numpy 1.3\n # in this version, astype() and copy() have no \"order\" keyword.\n # and asarray() does not accept the \"K\" and \"A\" values for order.\n\n # numpy.asarray never copies a subclass of numpy.ndarray (even for\n # memmaps) when dtype is unchanged.\n # .astype() always copies\n\n if order not in (\"C\", \"F\", \"A\", \"K\", None):\n raise ValueError(\"Invalid value for 'order': %s\" % str(order))\n\n if isinstance(arr, np.memmap):\n if dtype is None:\n if order in (\"K\", \"A\", None):\n ret = np.array(np.asarray(arr), copy=True)\n else:\n ret = np.array(np.asarray(arr), copy=True, order=order)\n else:\n if order in (\"K\", \"A\", None):\n # always copy (even when dtype does not change)\n ret = np.asarray(arr).astype(dtype)\n else:\n # First load data from disk without changing order\n # Changing order while reading through a memmap is incredibly\n # inefficient.\n ret = np.array(arr, copy=True)\n ret = _asarray(arr, dtype=dtype, order=order)\n\n elif isinstance(arr, np.ndarray):\n ret = _asarray(arr, dtype=dtype, order=order)\n # In the present cas, np.may_share_memory result is always reliable.\n if np.may_share_memory(ret, arr) and copy:\n # order-preserving copy\n if ret.flags[\"F_CONTIGUOUS\"]:\n ret = ret.T.copy().T\n else:\n ret = ret.copy()\n\n elif isinstance(arr, (list, tuple)):\n if order in (\"A\", \"K\"):\n ret = np.asarray(arr, dtype=dtype)\n else:\n ret = np.asarray(arr, dtype=dtype, order=order)\n\n else:\n raise ValueError(\"Type not handled: %s\" % arr.__class__)\n\n return ret", "def frombuffer(buffer, **kwargs):\n\n return call_origin(numpy.frombuffer, buffer, **kwargs)", "def coerce_array(data, dtype=None, force_integer=False, force_unsigned=False, force_float=False, force_gl=True):\n\n if dtype is None:\n if hasattr(data, \"dtype\"):\n dtype = Datatype.from_numpy(data.dtype)\n else:\n if force_integer:\n dtype = int32\n else:\n dtype = float32\n dtype = dtype.coerced(force_integer, force_unsigned, force_float, force_gl)\n\n return _np.ascontiguousarray(data, dtype.as_numpy())", "def coerce(a, dtype):\n if dtype is None or a.dtype == dtype: return a\n b = np.empty(a.shape, dtype = dtype)\n b[:] = a\n return b", "def ensure_array(obj):\n if is_array(obj):\n return obj\n else:\n return [obj]", "def to_device(self, arr, destbuf=None, flags=None):\n\n clflags = self.oclflags(self.mf.COPY_HOST_PTR, flags)\n arr_c = Ocl.check_array(arr)\n if destbuf is None:\n d_id = cl.Buffer(self.ctx, clflags, hostbuf=arr_c)\n else:\n if destbuf.size != arr_c.nbytes: # TODO : only throw error for destbuf.size < arr.nbytes ? But what would be the behavior ?\n raise ValueError(\"ERROR: to_device(): requested to transfer an array of %d bytes, when the device buffer is %d bytes\" % (arr.nbytes, destbuf.size))\n try:\n ev = cl.enqueue_copy(self.queue, destbuf, arr_c)\n # check ev against cl.command_execution_status.{COMPLETE, SUBMITTED, RUNNING, QUEUED}\n d_id = destbuf\n except cl.LogicError:\n raise RuntimeError(\"ERROR: to_device(): failed to transfer array of shape %s (dtype=%s)\" % (str(arr.shape), str(arr.dtype)))\n self.book_keep(d_id, arr_c.shape, arr_c.dtype)\n return d_id", "def _read_to_buffer(cls, buf, stream):\n # We could read it in one step, but instead we'll read it in chunks to avoid big temporaries.\n # (See below.)\n # buf[:] = stream.read( len(buf) )\n\n # Read data from the stream in chunks\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n buf[chunk_start:chunk_stop] = stream.read( next_chunk_bytes )\n remaining_bytes -= next_chunk_bytes", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def get_buf(self, data_type = \"void\"):\n if self.buf is not None:\n return ffi.cast(data_type + \"*\", self.buf)\n else:\n raise RuntimeError(\"Buffer not created.\")", "def _validate_bytes(datum, **kwargs):\n return isinstance(datum, (bytes, bytearray))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read and preprocess an image with data augmentation (random transform).
def read_for_training(p, augmentation=False): img = imread(TRAIN + p, mode='RGB') msk = img if mode == 'background': data = {'image': img} elif mode == 'instance' or mode == 'code': msk = imread(TRAIN_MASK + p.replace('.jpg', '.png')) data = {'image': img, 'mask': msk} if augmentation: data_aug = strong_aug()(**data) img = data_aug['image'] if 'mask' in data_aug: msk = data_aug['mask'] if mode == 'instance' or mode == 'code': img[~msk.astype(np.bool)] = 0 img, msk = size_normalization(img, msk) if mode == 'code': img = encode(img, msk) return img, msk
[ "def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_", "def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)", "def load_image(path, preprocess=True):\n #x = image.load_img(path, target_size=(H, W))\n x=path\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n #x = preprocess_input(x)\n return x", "def _load_preprocess_image(self, image_file):\n\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def preprocess_example_input(input_config):\n\n input_path = input_config[\"input_path\"]\n input_shape = input_config[\"input_shape\"]\n one_img = imread(input_path)\n if \"normalize_cfg\" in input_config.keys():\n normalize_cfg = input_config[\"normalize_cfg\"]\n mean = np.array(normalize_cfg[\"mean\"], dtype=np.float32)\n std = np.array(normalize_cfg[\"std\"], dtype=np.float32)\n one_img = imnormalize(one_img, mean, std)\n one_img = imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)\n (_, C, H, W) = input_shape\n one_meta = {\n \"img_shape\": (H, W, C),\n \"ori_shape\": (H, W, C),\n \"pad_shape\": (H, W, C),\n \"filename\": \"<demo>.png\",\n \"scale_factor\": 1.0,\n \"flip\": False,\n }\n\n return one_img, one_meta", "def _load_preprocess_image(self, image_file):\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def preprocess_example_input(input_config):\n input_path = input_config['input_path']\n input_shape = input_config['input_shape']\n one_img = mmcv.imread(input_path)\n one_img = mmcv.imresize(one_img, input_shape[2:][::-1])\n show_img = one_img.copy()\n if 'normalize_cfg' in input_config.keys():\n normalize_cfg = input_config['normalize_cfg']\n mean = np.array(normalize_cfg['mean'], dtype=np.float32)\n std = np.array(normalize_cfg['std'], dtype=np.float32)\n to_rgb = normalize_cfg.get('to_rgb', True)\n one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)\n one_img = one_img.transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(\n True)\n (_, C, H, W) = input_shape\n one_meta = {\n 'img_shape': (H, W, C),\n 'ori_shape': (H, W, C),\n 'pad_shape': (H, W, C),\n 'filename': '<demo>.png',\n 'scale_factor': np.ones(4),\n 'flip': False,\n 'show_img': show_img,\n }\n\n return one_img, one_meta", "def load_and_preprocess(upload_file, W, H, mean, std):\n \n if upload_file is not None:\n up_image = Image.open(upload_file) \n \n im_array = np.array(up_image)\n \n # Show the original image in the side-bar\n fig = plt.figure()\n plt.axis('off')\n plt.imshow(im_array, cmap=\"bone\")\n st.sidebar.pyplot(fig)\n \n # Resize, normalize, 3-channel and expand the dimensions of the image\n target_size=(H, W)\n x = up_image.resize(target_size)\n x -= mean\n x /= std\n \n y = np.stack((x,x,x), axis=2)\n y = np.expand_dims(y, axis=0)\n \n # x is the normalized image\n # y is the tensor for the model predictor\n return x, y", "def pre_process(img):\n # resize image to 512x512\n im_adjusted = resize(img, (img_dims,img_dims), anti_aliasing=True,\\\n preserve_range=True)\n\n # ensure image is grayscale (only has 1 channel)\n im_adjusted = im_adjusted.astype(np.float32)\n if len(im_adjusted.shape) >= 3:\n # squash 3 channel image to grayscale\n im_adjusted = np.dot(im_adjusted[...,:3], [0.299, 0.587, 0.114])\n \n # normalize the image to a 0-1 range\n if not np.amax(im_adjusted) < 1: # check that image isn't already normalized\n if np.amin(im_adjusted) < 0:\n im_adjusted += np.amin(im_adjusted)\n im_adjusted /= np.amax(im_adjusted)\n \n # model requires 4D input; shape it to expected dims\n im_adjusted = np.reshape(im_adjusted, (1, img_dims, img_dims, 1))\n return im_adjusted", "def preprocess(self, img):\n return img - np.mean(img)", "def load_and_preprocess_image(path, max_dim=512):\n f = tf.io.read_file(path)\n img = tf.io.decode_image(f)\n img = resize_min(img, max_dim)\n img = tf.expand_dims(img, axis=0)\n img = vgg_preprocess_input(img)\n return img", "def preprocess_image(image, model_image_size):\n resized_image = image.resize(model_image_size, Image.BICUBIC)\n image_data = np.asarray(resized_image).astype('float32')\n #image_data = normalize_image(image_data)\n image_data = np.expand_dims(image_data, 0)\n return image_data", "def apply_preprocess(self, img, batch_size=10):\n self._setup()\n\n # Loop to save RAM\n index = np.arange(img.shape[0])\n new_img = []\n for index in range(0, img.shape[0], batch_size):\n batch = img[index:min(index + batch_size, img.shape[0]), ::]\n processed = self._pre_process.augment_images(batch)\n new_img.append(processed)\n\n return np.concatenate(new_img, axis=0)", "def preprocess_image(filename):\n\n try: \n print(\"Read \" + filename)\n image_string = tf.io.read_file(filename)\n except: \n print(\"The image with string: \" + str(filename) + \"could not be loaded\")\n image_string = tf.io.read_file(images_path + \"00000.jpg\")\n \n image = tf.image.decode_jpeg(image_string, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.resize(image, target_shape)\n return image", "def augment_data(self, img):\n augmenter = torchvision.transforms.Compose([torchvision.transforms.RandomResizedCrop((img.shape[2],\n img.shape[2])),\n torchvision.transforms.ColorJitter(0.9, 0.9, 0.9, 0.5),\n torchvision.transforms.GaussianBlur(5, sigma=(1.5, 3.5))])\n augmented_1 = augmenter(img)\n augmented_2 = augmenter(img)\n # self.visualize_image(augmented_1)\n # self.visualize_image(augmented_2)\n return augmented_1, augmented_2", "def preprocess(img):\n return remap_color(rescale(img))", "def preprocess(img_name, quality=None):\n img = plt.imread(img_name)\n Y, X, C = img.shape\n img = img[:Y-Y%2, :X-X%2, :3]\n if quality is not None:\n img = jpeg_compress(img, quality)\n img = img_to_tensor(img).cuda().type(torch.float)\n return img", "def _preprocess_image(self, image_raw):\n\n image = tf.io.decode_raw(image_raw, tf.float64)\n\n return image * self.rescale", "def preprocess_images(self, images):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Amount which will increase x until it's divisible evenly by 64
def padlen_64(x: int): return (64 - (x % 64)) % 64
[ "def bulk_modulus():\n\n return 10000.0", "def mod_5(x):\r\n return x%5", "def foo4(x):\n\tresult = 1\n\tfor i in range(1, x +1):\n\t\tresult = result * i\n\treturn result", "def mod_5(x):\n return x % 5", "def enlarge(n):\n\n return n* 100", "def euler52():\n\tx = 0\n\twhile True:\n\t\tx += 1\n\t\tif sorted(str(x)) != sorted(str(2*x)): continue\n\t\tif sorted(str(x)) != sorted(str(3*x)): continue\n\t\tif sorted(str(x)) != sorted(str(4*x)): continue\n\t\tif sorted(str(x)) != sorted(str(5*x)): continue\n\t\tif sorted(str(x)) != sorted(str(6*x)): continue\n\t\treturn x", "def enlarge(n):\n return n * 100", "def _roundup(self, x):\n\n if x < 100:\n fact = 10\n elif x < 1000:\n fact = 100\n elif x < 10000:\n fact = 1000\n elif x < 100000:\n fact = 10000\n else:\n fact = 100000\n\n return x if x % fact == 0 else x + fact - x % fact", "def divPow2RoundUp(x, n):\n\treturn (x + (1L << n) - 1) / (1L << n);", "def sum_divisible_by(n, max_val=999):\n return n * (max_val/n) * (max_val/n + 1) / 2", "def haut(self):\n self.__x += 1\n if self.__x > 10:\n self.__x = 10", "def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum", "def five():\r\n \r\n n = 20\r\n divisible = False\r\n \r\n while divisible == False:\r\n n += 20\r\n divisible = True\r\n for i in range(20, 0, -1):\r\n if n % i != 0:\r\n divisible = False\r\n break\r\n return n", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def lastndigits(n, p):\n return p % 10**n", "def multiply_by_4(x):\n\treturn int(x) * 4", "def oddceil(x):\r\n\r\n return oddround(x + 1)", "def perfect_hash(num):\n return ((num+OFFSET)*(SIZE/PERIOD)) % (SIZE+1) + 1", "def get_next_interval_divisor() -> typing.Generator[int, None, None]:\n yield 5\n yield 10\n yield 25\n yield 50\n i = 1\n while True:\n i += 1\n yield 10**i" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns this userreadable string into an Alternative (no escaping)
def from_str(cls, encstr: str) -> 'Alternative': encstr = re.sub(r'\s+', '', encstr) return cls(*re.split('([' + string.punctuation + '])', encstr, maxsplit=1))
[ "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def read_alt(self, alt_string):\n match = self.alt_pattern.match(alt_string)\n if not match:\n raise SyntaxError(\"One of the ALT lines is malformed: %s\" % alt_string)\n\n alt = _Alt(match.group(\"id\"), match.group(\"desc\"))\n\n return (match.group(\"id\"), alt)", "def _alt(text):\n alt = text.split(',')\n return alt", "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def random_alternative(self, fmt_string):\n # Find alternatives\n try:\n alts = self[fmt_string]\n except KeyError:\n # There are no alternatives for this string\n return fmt_string\n return random.choice(alts)", "def _unconvert_str(self, value):\n return self._unconvert_default(value)", "def unicodise_safe(string, encoding = None):\n\n\treturn unicodise(deunicodise(string, encoding), encoding).replace(u'\\ufffd', '?')", "def unescape_copy(val):\r\n if val == r\"\\N\":\r\n return None\r\n return unescape(val)", "def escapeDecode(s: unicode) -> unicode:\n ...", "def _unquote_or_none(s):\r\n if s is None:\r\n return s\r\n return escape.url_unescape(s, encoding=None, plus=False)", "def __init__(self, string):\n\n\t\tself.unnormal = string\n\t\tself.normal = self._normalize(string)", "def rl_unescape_prompt(prompt: str) -> str:\n if rl_type == RlType.GNU:\n escape_start = \"\\x01\"\n escape_end = \"\\x02\"\n prompt = prompt.replace(escape_start, \"\").replace(escape_end, \"\")\n\n return prompt", "def _sanitize_string_for_python(self, s):\n s = repr(s)\n\n if s.startswith('u'):\n s = s[1:]\n\n return s", "def raw(text):", "def normalize(self, text: str) -> str:", "def raw(self, txt_unRaw):", "def optionxform(self, optionstr):\r\n return optionstr", "def smart_replace(line):\n\n # start with simple substitutions\n line = line.replace(\"--\", EM_DASH)\n line = line.replace(' ', ' ')\n line = line.replace(' ', ' ')\n\n # now parse more contextual ones\n italics_on = False\n index = 0\n while index < len(line):\n if line[index] == \"'\":\n if char_weight(line, index - 1) < char_weight(line, index + 1):\n line = substitute(line, index, SINGLE_LEFT_QUOTE)\n index += len(SINGLE_LEFT_QUOTE)\n else:\n line = substitute(line, index, SINGLE_RIGHT_QUOTE)\n index += len(SINGLE_RIGHT_QUOTE)\n elif line[index] == '\"':\n if char_weight(line, index - 1) < char_weight(line, index + 1):\n line = substitute(line, index, DOUBLE_LEFT_QUOTE)\n index += len(DOUBLE_LEFT_QUOTE)\n else:\n line = substitute(line, index, DOUBLE_RIGHT_QUOTE)\n index += len(DOUBLE_RIGHT_QUOTE)\n elif line[index] == '*':\n if italics_on:\n line = substitute(line, index, r'\\i0 ')\n index += len(r'\\i0 ')\n else:\n line = substitute(line, index, r'\\i ')\n index += len(r'\\i ')\n italics_on = not italics_on\n else:\n index += 1\n if italics_on:\n line += r'\\i0 '\n return line", "def test_remove_alt_codes(self):\n\n cleaner = Cleaner(remove_alt_codes=True)\n\n text = 'Our prediction based on #FIFA Rankings, &amp; Country Risk Ratings'\n self.assertEqual('Our prediction based on #FIFA Rankings, Country Risk Ratings', cleaner.clean(text))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pull a Restriction from encoded string, return remainder
def decode(cls, encstr: str) -> Tuple['Restriction', str]: alts = [] while len(encstr) != 0: if encstr.startswith('&'): encstr = encstr[1:] break alt, encstr = Alternative.decode(encstr) alts.append(alt) return cls(alts), encstr
[ "def from_str(cls, encstr: str) -> 'Restriction':\n encstr = re.sub(r'\\s+', '', encstr)\n ret, remainder = cls.decode(encstr)\n if len(remainder) != 0:\n raise ValueError(\"Restriction had extrs characters at end: {}\"\n .format(remainder))\n return ret", "def parse_mask(string):\n return string.split(' = ')[1]", "def decode(self, s: str) -> [str]:", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def decode_string(self, value):\r\n return value", "def restriction_from_str(self, srest, dest=None, *, verify=True):\n if dest is None:\n dest = np.empty(self.num_strats, bool)\n else:\n utils.check(dest.dtype.kind == \"b\", \"dest dtype must be boolean\")\n utils.check(\n dest.shape == (self.num_strats,), \"dest shape must be num strats\"\n )\n dest.fill(False)\n\n role = None\n for line in srest.split(\"\\n\"):\n if line[0] != \" \":\n role = line[:-1]\n else:\n dest[self.role_strat_index(role, line[4:])] = True\n utils.check(\n not verify or self.is_restriction(dest),\n '\"{}\" does not define a valid restriction',\n srest,\n )\n return dest", "def minisat_decode(clause_str):\n factor = ClauseVariable.encoding_factor()\n int_value = int(clause_str)\n compliment = (int_value < 0)\n int_value = abs(int_value)\n position = (int_value % factor) -1\n vertex = math.ceil(int_value/factor)-1\n return ClauseVariable(compliment,vertex,position)", "def decode_string(self, value):\n\t\treturn value", "def restriction_from_repr(self, rrest, dest=None, *, verify=True):\n if dest is None:\n dest = np.empty(self.num_strats, bool)\n else:\n utils.check(dest.dtype.kind == \"b\", \"dest dtype must be boolean\")\n utils.check(\n dest.shape == (self.num_strats,), \"dest shape must be num strats\"\n )\n dest.fill(False)\n for role_str in rrest.split(\";\"):\n role, strats = (s.strip() for s in role_str.split(\":\", 1))\n for strat in strats.split(\",\"):\n dest[self.role_strat_index(role, strat.strip())] = True\n utils.check(\n not verify or self.is_restriction(dest),\n '\"{}\" does not define a valid restriction',\n rrest,\n )\n return dest", "def _decode_word(self, word: str) -> str:\n if len(word) <= 3 or len(set(word[1:-1])) == 1:\n return word\n possible_words = filter(\n lambda original_word: original_word.startswith(word[0])\n and original_word.endswith(word[-1]),\n self.original_words,\n )\n for possible_word in possible_words:\n if self._check_if_middle_part_chars_equal(possible_word, word):\n return possible_word\n return word", "def _mb_substr(string, start, length):\n return string.decode(_ENCODING)[start: start + length]", "def decode_result(found):\n ...", "def auth_sub_string_from_body(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Token='):\n # Strip off Token= and return the token value string.\n return response_line[6:]\n return None", "def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end", "def decode(self, encoded):", "def restrictionFromIndex(self, index):\r\n\r\n if index.isValid():\r\n return unicode(self._storedSearchQueries[index.row()][1])\r\n return \"\"", "def _decode_octet_string(self, bytes):\n return bytes", "def decode_message(message):", "def extract_from_string( string, prefix ):\n # regex template\n template = \"(?<=%s)\\d+\"\n if type(prefix) is list:\n s = '|'.join([template % p for p in prefix])\n else:\n s = template % prefix\n\n # im going to let this fail \n val = re.search( s, string ).group()\n return val" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a Restriction from an escaped string (ignoring whitespace)
def from_str(cls, encstr: str) -> 'Restriction': encstr = re.sub(r'\s+', '', encstr) ret, remainder = cls.decode(encstr) if len(remainder) != 0: raise ValueError("Restriction had extrs characters at end: {}" .format(remainder)) return ret
[ "def format(self, y):\n try:\n if isinstance(y, RestrictionType):\n return y\n elif isinstance(eval(str(y)), RestrictionType):\n return eval(y)\n except (NameError, SyntaxError):\n pass\n raise ValueError(\"%s is not a RestrictionType\" % y.__class__)", "def get_prevent_regex(prevent_str):\n prevent = []\n if prevent_str == None: return prevent\n for p in prevent_str.split(\",\"):\n p = p.strip()\n current = []\n for c in p:\n t = \"(\" + \"|\".join(list(IUPAC[c])) + \")\"\n current.append(t)\n prevent.append(\"\".join(current))\n #print(prevent)\n return prevent", "def restriction_from_str(self, srest, dest=None, *, verify=True):\n if dest is None:\n dest = np.empty(self.num_strats, bool)\n else:\n utils.check(dest.dtype.kind == \"b\", \"dest dtype must be boolean\")\n utils.check(\n dest.shape == (self.num_strats,), \"dest shape must be num strats\"\n )\n dest.fill(False)\n\n role = None\n for line in srest.split(\"\\n\"):\n if line[0] != \" \":\n role = line[:-1]\n else:\n dest[self.role_strat_index(role, line[4:])] = True\n utils.check(\n not verify or self.is_restriction(dest),\n '\"{}\" does not define a valid restriction',\n srest,\n )\n return dest", "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def restriction_from_repr(self, rrest, dest=None, *, verify=True):\n if dest is None:\n dest = np.empty(self.num_strats, bool)\n else:\n utils.check(dest.dtype.kind == \"b\", \"dest dtype must be boolean\")\n utils.check(\n dest.shape == (self.num_strats,), \"dest shape must be num strats\"\n )\n dest.fill(False)\n for role_str in rrest.split(\";\"):\n role, strats = (s.strip() for s in role_str.split(\":\", 1))\n for strat in strats.split(\",\"):\n dest[self.role_strat_index(role, strat.strip())] = True\n utils.check(\n not verify or self.is_restriction(dest),\n '\"{}\" does not define a valid restriction',\n rrest,\n )\n return dest", "def test_re_handling_escape_char(self):\n string_for_regex = '%sing'\n regex = re_handling.ReHandling(string_for_regex)\n self.assertEqual(regex.pattern.pattern, '\\\\' + string_for_regex, 'regex test failed')", "def restricted_string_type(\n name: str,\n regex: Union[str, Pattern],\n docstring: Optional[str] = None,\n) -> Type:\n if isinstance(regex, str):\n regex = re.compile(regex)\n expression = 'matching '+regex.pattern\n\n extra_attrs = {\n '_regex': regex,\n '_expression': expression,\n '_type': str,\n }\n\n def check_value(cls, v):\n if not cls._regex.match(v):\n raise ValueError('invalid value, \"'+v+'\" does not match regular expression '+cls._regex.pattern)\n\n return create_type(\n name=name,\n base_type=str,\n check_value=check_value,\n register_key=(expression, str),\n docstring=docstring,\n extra_attrs=extra_attrs\n )", "def _StringEscape(self, string='', match='', **unused_kwargs):\n if match.group(1) not in '\\\\\\'\"rnbt\\\\.ws':\n raise errors.ParseError('Invalid escape character {0:s}.'.format(string))\n\n decoded_string = codecs.decode(string, 'unicode_escape')\n return self._StringExpand(string=decoded_string)", "def parseRestrictions(restrictions):\n if not restrictions:\n return None, None\n editRestriction = None\n moveRestriction = None\n editLockMatch = re.search('edit=([^:]*)', restrictions)\n if editLockMatch:\n editRestriction = editLockMatch.group(1)\n moveLockMatch = re.search('move=([^:]*)', restrictions)\n if moveLockMatch:\n moveRestriction = moveLockMatch.group(1)\n if restrictions == 'sysop':\n editRestriction = 'sysop'\n moveRestriction = 'sysop'\n return editRestriction, moveRestriction", "def _like_protect(self, string, escape_char='\\\\'):\n\n return string.replace('_', '%s_' % escape_char)\\\n .replace('%', '%s%%' % escape_char).replace(escape_char, escape_char*2)", "def literal(value):\n return EscapedString(value, True)", "def restricted_string_type(\n name: str,\n regex: Union[str, Pattern],\n docstring: Optional[str] = None,\n) -> type:\n if isinstance(regex, str):\n regex = re.compile(regex)\n expression = \"matching \" + regex.pattern\n\n extra_attrs = {\n \"_regex\": regex,\n \"_expression\": expression,\n \"_type\": str,\n }\n\n def check_value(cls, v):\n if not cls._regex.match(v):\n raise ValueError(f\"{v} does not match regular expression {cls._regex.pattern}\")\n\n return create_type(\n name=name,\n base_type=str,\n check_value=check_value,\n register_key=(expression, str),\n docstring=docstring,\n extra_attrs=extra_attrs,\n )", "def _get_legal(token):\n valid = re.split(r'[^]a-zA-Z0-0![,. {}@#$%^&*-_+=;:<>?/~\\'\\\\`]', token)\n return ''.join(valid).strip()", "def test_literal_string_annotation(annotation: str, expected: str) -> None:\n stmt, = ast.parse(annotation).body\n assert isinstance(stmt, ast.Expr)\n unstringed = astutils._AnnotationStringParser().visit(stmt.value)\n assert astor.to_source(unstringed).strip() == expected", "def test_literal_string_annotation(annotation: str, expected: str) -> None:\n stmt, = ast.parse(annotation).body\n assert isinstance(stmt, ast.Expr)\n unstringed = astbuilder._AnnotationStringParser().visit(stmt.value)\n assert astor.to_source(unstringed).strip() == expected", "def sanitize(string) -> str:\n return '\"'+string+'\"'", "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def eval_cast(string):\n\n return W.string_eval_expression(string, {}, {}, {})", "def ensure_quotes(s):\n return '\"{}\"'.format(s) if not s.isalnum() else s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the restrictions against the values dict given. Normally values are treated strings, but conditions only work if they're actually integers. Returns (True, '') if everything is good. Otherwise, returns (False, reasonstring)
def are_restrictions_met(self, values: Dict[str, Any]) -> Tuple[bool, str]:, for r in self.restrictions: reasons = r.test(values) if reasons is not None: return False, reasons return True, ''
[ "def check_condition(cond_list, values_dict):\n key = cond_list[0]\n compare_str = cond_list[1]\n number = int(cond_list[2])\n\n if key not in values_dict:\n values_dict[key] = 0\n if compare_str == \">\":\n return values_dict[key] > number\n elif compare_str == \"<\":\n return values_dict[key] < number\n elif compare_str == \">=\":\n return values_dict[key] >= number\n elif compare_str == \"<=\":\n return values_dict[key] <= number\n elif compare_str == \"!=\":\n return values_dict[key] != number\n return values_dict[key] == number", "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def checkValue(c, m, y, k):\n MINVAL=0\n MAXVAL=255\n valueOk=True\n for val in c, m, y, k:\n if val >=MINVAL and val <=255:\n pass\n else:\n valueOk=False\n \n return valueOk", "def compare_param_values(value_1: Any, value_2: Any) -> bool:\n data = {'value_1': value_1, 'value_2': value_2}\n for key, value in data.items():\n if isinstance(value, str):\n try:\n data[key] = float(value)\n except ValueError:\n ...\n if isinstance(value, float) and value.is_integer():\n data[key] = int(value)\n return data['value_1'] == data['value_2']", "def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True", "def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")", "def checkvalue( key, value, exceptions = {}):\n import racy.renv.configs.allowedvalues as vars\n if exceptions.get(key, Undefined) == value:\n return True\n v = getattr(vars, key, Undefined)\n return v == Undefined or value in v", "def _check_count_and_values(\n configuration: Dict,\n value_count: int,\n values: List[int],\n count_name: str,\n values_name: str,\n max_count: int,\n):\n if count_name in configuration and values_name in configuration:\n if len(values) != value_count:\n raise ValueError(\n f\"Both {count_name} and {values_name} are defined but they are inconsistent. \"\n f\"{count_name} is {value_count} while {values_name} has length {len(values)}. \"\n )\n if values:\n if [d for d in values if d not in range(0, max_count)]:\n raise ValueError(\n f\"{values_name} contains draws outside of 0-{max_count - 1}: \"\n f\"{[d for d in values if d not in range(0, max_count)]}\"\n )\n if value_count < 1 or value_count > max_count:\n raise ValueError(f\"{count_name} must be within 1-{max_count}. Given: {value_count}\")", "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def _validate_values(self, to_increment, to_decrement):\n rv = self.restricted_values\n valid_increment = (to_increment not in rv) & ((to_increment + self.granularity) not in rv)\n valid_decrement = (to_decrement not in rv) & ((to_decrement - self.granularity) not in rv)\n return valid_decrement & valid_increment", "def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None", "def _check_values(self, values):\n mod = []\n for k, v in values.items():\n if isinstance(v, str) and \"'\" in v:\n mod.append(k)\n if len(mod) == 0:\n return values\n else:\n values = copy.copy(values)\n for k in mod:\n values[k] = values[k].replace(\"'\", \"''\")\n return values", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def _check_users(users):\n\n messg = \"\"\n valid = True\n\n for user, user_details in users.items():\n if not user_details:\n valid = False\n messg += \"Please provide details for username {user}.\\n\".format(user=user)\n continue\n if not (\n isinstance(user_details.get(\"level\"), int)\n or 0 <= user_details.get(\"level\") <= 15\n ):\n # warn!\n messg += (\n \"Level must be a integer between 0 and 15 for username {user}. Will\"\n \" assume 0.\\n\".format(user=user)\n )\n\n return valid, messg", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def test_validate_allowed_values() -> None:\n assert validate_allowed_values([], \"not_allowed\")\n assert not validate_allowed_values([\"allowed\"], \"not_allowed\")\n assert validate_allowed_values([\"allowed\"], \"allowed\")", "def check_value(self, key: str, value: Any):\n # Check the value with a set of tests\n self._check_missing(key, value)\n self._check_allowed_values(key, value)\n self._check_data_type(key, value)\n self._check_value_range(key, value)", "def _value_error(cond, str):\n if not cond:\n raise ValueError(str)", "def check_var_valid_value(var, values, err_msg=None):\n\n if var not in values:\n if err_msg is not None:\n err_msg = f\"The value specified in var = {var} is not supported.\"\n print_err_msg_exit(\n err_msg + f\"{var} must be set to one of the following:\\n {values}\"\n )\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allinone check that a runestring is valid, derives from this MasterRune and passes all its conditions against the given dictionary of values or callables
def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]: try: rune = Rune.from_base64(b64str) except: # noqa: E722 return False, "runestring invalid" if not self.is_rune_authorized(rune): return False, "rune authcode invalid" return rune.are_restrictions_met(values)
[ "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def valid_char(self, char):\n if char.lower() in self.data.keys():\n return True\n return False", "def valid_mapping_entry(text):\n if len(text) > 1:\n # Cannot be more than 1 long\n return False\n if len(text) == 0:\n # Allow empty (no mapping)\n return True\n # Otherwise check if it is a letter\n chr_code = ord(text)\n return 65 <= chr_code <= 90 or 97 <= chr_code <= 122", "def is_key_valid(self,key):\n if not key or any(map(lambda s: s in key,space_chars))\\\n or any(map(lambda s: s in key,bad_chars)):\n return False \n return True", "def enchant_check(self, arg):\n x = self.dictionary.check(arg)\n return x", "def validate_string_is_lookup(v: Any) -> Any:\n if isinstance(v, str) and not re.match(RUNWAY_LOOKUP_STRING_REGEX, v):\n raise RUNWAY_LOOKUP_STRING_ERROR\n return v", "def test_instruction_from_rule_succes():\n valid_rule = {'instruction_in_valid_rule': 'test'}\n assert DictValidator._instruction_from_rule(valid_rule) == \\\n 'instruction_in_valid_rule'", "def validate(self, ve: ValidatingEntry, value: str):\n unused(ve)\n\n if self.ro:\n return True\n try:\n self.parse(value)\n except ValueError as err:\n return str(err)\n return True", "def validate_letter_input(letter):\n\n # verify the input is of type string\n if type(letter) is not str:\n raise Exception('{} is not a string'.format(letter))\n # verify the input is one of the characters that the RNN can process\n if letter not in char_to_i.keys():\n raise Exception('{} is not a lowercase letter or the underscore'.format(letter))", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def test_should_raise_in_case_of_wrong_characters(self):\n validator = CharCombinationValidator()\n\n regex = re.compile(r'[\\(\\[\\{]\\)\\]\\}')\n forbidden_chars = regex.sub('', punctuation)\n for char in forbidden_chars:\n with self.assertRaises(FormulaValidationError):\n validator('Fe(O)2%s' % char)", "def _check_code_string(value: str) -> None:\n if not isinstance(value, str):\n raise TypeError('Invalid type for a code string.')\n\n if re.match(r'[A-Z0-9_ ]{1,16}$', value) is None:\n raise ValueError(\n 'Code string must contain between 1 and 16 characters that are '\n 'either uppercase letters, numbers, spaces, or underscores.'\n )\n\n if re.match(r'[0-9 _]{1}.*', value) is not None:\n raise ValueError(\n 'Code string must not start with a number, space, or underscore.'\n )\n\n if re.match(r'.*[_ ]$', value) is not None:\n raise ValueError(\n 'Code string must not end with a space or underscore.'\n )", "def _landcover_string_validity(self, string):\n if string in FROM_GLC_LEGEND.keys():\n return True\n elif string == 'Random':\n return True\n else:\n return False", "def validate(cls, tab_dict, raise_error=True):\r\n return key_checker(['type'])(tab_dict, raise_error)", "def _validate(self, currency_code, rate_date, buying_rate, selling_rate,\n median_rate, unit_value):\n for var, regex in self.RE_VALIDATION_DICT.items():\n if re.match(regex, eval(var)) is None:\n raise ValueError('{}: wrong format'.format(var))\n\n if unit_value not in [1, 100]:\n raise ValueError('unit_value must be 1 or 100')", "def e_monossilabo(chars):\r\n if type(chars) != str:# Verifica se e string\r\n raise ValueError(\"e_monossilabo:argumento invalido\")\r\n tam = len(chars)\r\n # Dependendo do tamanho da string, averigua se e um monossilabo valido\r\n if tam == 1:\r\n if chars[0] in vogal_palavra:\r\n return True\r\n else:\r\n return False\r\n elif tam == 2:\r\n return e_monossilabo2(chars)\r\n elif tam == 3:\r\n return e_monossilabo3(chars)\r\n else:\r\n return False", "def test_literal(traverser, wrapper):\n value = wrapper.get_literal_value()\n if isinstance(value, basestring):\n # Local import to prevent import loop.\n from validator.testcases.regex import validate_string\n validate_string(value, traverser, wrapper=wrapper)", "def check_eval_str(s):\n bad_strings = ('import', 'os.', 'sys.', '.__', '__.')\n for bad_s in bad_strings:\n if bad_s in s:\n raise ValueError('Will not eval() string which contains \"{}\": {}'\n .format(bad_s, s))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function that the b64str runestring is valid, derives from our secret, and passes against these values. If you want to check many runes, it's more efficient to create the MasterRune first then check them, but this is fine if you're only checking one.
def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]: return MasterRune(secret).check_with_reason(b64str, values)
[ "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def main_validate(seed_input,valid,seed_input_orig):\n from itertools import permutations\n# print(\"Inside main_validate :\", seed_input)\n m = mnemonic.Mnemonic('english')\n test = True\n subset = []\n for subset in permutations(seed_input, len(seed_input)):\n#\t print (' '.join(subset))\n#\t print (len(subset), len(seed_input))\n if len(subset) == len(seed_input):\n if m.check(' '.join(subset)):\n if subset != seed_input:\n result = ' '.join(subset)\n#\t\t\tprint (subset[0], seed_input_orig[0], subset[-1], seed_input_orig[-1])\n\t\t\tif ((subset[0] == seed_input_orig[0]) and (subset[-1]== seed_input_orig[-1])):\n# this conditional is because I know my original passphrase started with and end with the right word thus\n# the correct crc. I do not want to evaluate the other possiblilities to discover my correct passphrase\n\t\t\t\tprint (result)\n\t\t\tvalid = True\n else:\n if subset == seed_input:\n\t\t\t\ttest = False\n\t\t\t\tvalid = False\n\t\t\tprint \"There was a problem with the words you gave, maybe they are not on the bip39 word list or the number of words does not work.\"\n break # found a valid one, stop looking.\n return seed_input", "def is_seed_valid(seed):\n if seed == \"0\":\n return True\n\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def test_is_valid_user_id_is_false_for_alphabetic_content(self):\n # YWJj = base64(abc)\n self.assertFalse(TokenRemover.is_valid_user_id('YWJj'))", "def _check_code_string(value: str) -> None:\n if not isinstance(value, str):\n raise TypeError('Invalid type for a code string.')\n\n if re.match(r'[A-Z0-9_ ]{1,16}$', value) is None:\n raise ValueError(\n 'Code string must contain between 1 and 16 characters that are '\n 'either uppercase letters, numbers, spaces, or underscores.'\n )\n\n if re.match(r'[0-9 _]{1}.*', value) is not None:\n raise ValueError(\n 'Code string must not start with a number, space, or underscore.'\n )\n\n if re.match(r'.*[_ ]$', value) is not None:\n raise ValueError(\n 'Code string must not end with a space or underscore.'\n )", "def validate_admin (admin_secret):\n\n try:\n admin_secret = admin_secret.encode()\n hashed = app.config['ADMIN_SECRET'].encode()\n return bcrypt.checkpw(admin_secret, hashed)\n\n except Exception as e:\n return False", "def validate_db_admin (db_secret):\n\n try:\n db_secret = db_secret.encode()\n hashed = app.config['DB_SECRET'].encode()\n return bcrypt.checkpw(db_secret, hashed)\n except Exception as e:\n return False", "def test_dna_validator(self):\n \n dna = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test invalid characters\n invalid_dna1 = 'EETGGAGACGGAAACASTCCGAGGACATCCGGAGGAACCCGGGGAGTZVTHHCTGAGTGGTAAT'\n # test invalid length\n invalid_dna2 = 'GGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test for invalid internal stop\n invalid_dna3 = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTTGAGTGGTAATC'\n expected_validationT = True\n expected_validationF = False\n result_validation1 = dna_validator(dna)\n self.assertEqual(result_validation1, expected_validationT)\n result_validation2 = dna_validator(invalid_dna1)\n self.assertEqual(result_validation2, expected_validationF)\n result_validation3 = dna_validator(invalid_dna2)\n self.assertEqual(result_validation3, expected_validationF)\n result_validation4 = dna_validator(invalid_dna3)\n self.assertEqual(result_validation4, expected_validationF)", "def legal_password(s):\n pass", "def mnemonic_is_valid(mnemonic: str, wordlist=WORDLIST):\n try:\n mnemonic_to_bytes(mnemonic, wordlist=wordlist)\n return True\n except Exception as e:\n return False", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def code_eight(entry):\r\n if len(entry) == 8 and (entry == \"AA9A 9AA\" or entry == \"AA99 9AA\"):\r\n return True\r\n else:\r\n return False", "def verify(string, base=10, decoder=decimal_decoder):\n\n return luhn_sum_mod_base(string, base=base, decoder=decoder) == 0", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def validate_topt_secret(secret, code):\n totp = pyotp.TOTP(secret)\n return totp.verify(code.replace(\" \", \"\"))", "def isValidCode(code):\n for character in code:\n if len(code) == 3 and character in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n return True\n else:\n return False", "def verify_input(input_hash):\n from re import match\n if len(input_hash) != 32:\n verification = False\n return verification\n else:\n if match(\"^[a-f0-9]*$\", input_hash):\n verification = True\n return verification\n else:\n verification = False\n return verification" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse read and quality strings from a FASTQ file with sequencing reads.
def readFastq(filename): sequences = [] qualities = [] with open(filename) as fh: while True: fh.readline() # skip name line seq = fh.readline().rstrip() #read base sequence fh.readline() # skip placeholder line qual = fh.readline().rstrip() # base quality line if len(seq) == 0: break sequences.append(seq) qualities.append(qual) return sequences, qualities
[ "def readFastq(filename):\n sequences = []\n qualities = []\n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() # read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rstrip() # base quality line\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n return sequences, qualities", "def readFastq(filename):\n sequences = []\n qualities = []\n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() # read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rstrip() # base quality line\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n return sequences, qualities", "def readFastq(filename):\n\tsequences = []\n\tqualities = []\n\twith open(filename, 'r') as f:\n\t\twhile True: \n\t\t\tf.readline() # skip name line\n\t\t\tseq = f.readline().rstrip()\n\t\t\tf.readline() # skip place holder line \n\t\t\tq = f.readline().rstrip()\n\t\t\tif len(seq) ==0:\n\t\t\t\tbreak \n\t\t\tsequences.append(seq)\n\t\t\tqualities.append(q)\n\treturn sequences, qualities", "def read_fastq(file_name):\r\n sequences = []\r\n qualities = []\r\n\r\n with open(file_name) as in_file:\r\n while True:\r\n # skip header for now\r\n in_file.readline()\r\n # get the sequence data\r\n seq = in_file.readline().rstrip()\r\n # skip the + sign\r\n in_file.readline()\r\n # read in quality data\r\n qual = in_file.readline().rstrip()\r\n\r\n #exit at the end of the file\r\n if len(seq) == 0:\r\n break\r\n\r\n sequences.append(seq)\r\n qualities.append(qual)\r\n\r\n return sequences, qualities", "def read_fasta_quality(quality_file):\n qual_fast = fast()\n for line in quality_file:\n if line.startswith('>'):\n #quality files also have identifiers, matching the sequences they apply to\n #this can be used to seperate scores\n if qual_fast.iden == None:\n qual_fast.iden = line[1:]\n continue\n else:\n yield(qual_fast.qual)\n qual_fast.iden = line[1:]\n qual_fast.qual = []\n continue\n else:\n for score in line.split():\n #FASTA quality files have white spaces, must use split or will store as strings\n qual_fast.qual+=[int(score)]\n continue\n\n if qual_fast.iden is not None:\n yield(qual_fast.qual)", "def _getFastQ( file,qseq,minLen=0,qualityTh=0,ASCII_offset=64 ):\n ##GERALD (QSEQ)\n if qseq:\n qseq=file.next()[:-1]\n \n qseq_element=qseq.split('\\t') #SOLEXA 90403 4 1 23 1566 0 1 ACCGCTCTCGTGCTCGTCGCTGCGTTGAGGCTTGCG `aaaaa```aZa^`]a``a``a]a^`a\\Y^`^^]V` 1\n if len(qseq_element)!=11 or qseq_element[-1]!='1': \n return\n \n #formatting\n name = '@%s:%s:%s:%s:%s#%s/%s' % ( qseq_element[0],qseq_element[2],qseq_element[3],qseq_element[4],qseq_element[5],qseq_element[6],qseq_element[7] )\n seq,quals = qseq_element[8],qseq_element[9]\n \n #clip seq & quals @ . ( unknown base )\n seq = _clipSeq( seq,quals,minLen,'.' )\n\n ##FASTQ\n else:\n name = file.next()[:-1]\n seq = file.next()[:-1]\n sep = file.next()[:-1]\n quals = file.next()[:-1]\n \n #format name - @HWI-ST227:145:C06RAACXX:7:1101:1156:2148 2:Y:0:AACT > @HWI-ST227:145:C06RAACXX:7:1101:1156:2148/2\n if len( name.split() ) > 1:\n name = '%s/%s' % ( name.split()[0],name.split()[1][0] )\n \n #clip seq & quals @ N ( unknown base )\n seq = _clipSeq( seq,quals,minLen,'N' )\n \n if not seq:\n return\n \n #return PHRED+33 quals (Sanger encoding)\n if ASCII_offset==64:\n quals=''.join( [ chr(ord(q)-31) for q in quals ] )\n \n #cut sequence & quals @ quality\n if qualityTh:\n pos=0\n for q in quals:\n phredQ=ord(q)-33 #PHRED+33 encoding\n if phredQ<qualityTh: \n seq,quals=seq[:pos],quals[:pos]\n if len(seq)<minLen or not seq: \n return\n break\n pos+=1\n \n #define fastQ line\n fastq='%s\\n%s\\n+\\n%s\\n' % ( name,seq,quals )\n \n return fastq", "def _parse_fastq(f):\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()", "def parse(self):\n\n with open(self.fasta_file) as file:\n content = file.readlines()\n\n sequences = []\n sequence_ids = []\n sequence = []\n for line in content:\n if line.startswith('>'):\n sequence_ids.append(line.strip())\n if len(sequence) != 0:\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n sequence = []\n elif line.startswith(\"A\") or line.startswith(\"T\") or \\\n line.startswith(\"C\") or line.startswith(\"G\"):\n sequence.append(line.strip())\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n\n if len(sequences) > MAX_SEQUENCES:\n print WARNING_MAX_SEQUENCES_EXCEEDED\n\n return sequences, sequence_ids", "def readfq(fp): # this is a generator function\n last = None # this is a buffer keeping the last unprocessed line\n while True: # mimic closure; is it a bad idea?\n if not last: # the first record or a record following a fastq\n for l in fp: # search for the start of the next record\n if l[0] in '>@': # fasta/q header line\n last = l[:-1] # save this line\n break\n if not last: break\n name, seqs, last = last[1:].partition(\" \")[0], [], None\n for l in fp: # read the sequence\n if l[0] in '@+>':\n last = l[:-1]\n break\n seqs.append(l[:-1])\n if not last or last[0] != '+': # this is a fasta record\n yield name, ''.join(seqs), None # yield a fasta record\n if not last: break\n else: # this is a fastq record\n seq, leng, seqs = ''.join(seqs), 0, []\n for l in fp: # read the quality\n seqs.append(l[:-1])\n leng += len(l) - 1\n if leng >= len(seq): # have read enough quality\n last = None\n yield name, seq, ''.join(seqs); # yield a fastq record\n break\n if last: # reach EOF before reading enough quality\n yield name, seq, None # yield a fasta record instead\n break", "def read_fastq(filename, strip_second_header=True):\n\n with open(filename) as fastq:\n line = fastq.readline()\n if not line.startswith(\"@\"):\n raise IOError(\"Not FASTQ format? First line didn't start with @\")\n while fastq:\n if line.startswith(\"@\"):\n header = line.rstrip()\n seq = fastq.readline().rstrip()\n second_header = fastq.readline()\n if strip_second_header:\n second_header = \"+\"\n scores = fastq.readline().rstrip()\n yield header, seq, second_header, scores\n elif line == \"\": # EOF\n yield header, seq, second_header, scores\n break\n line = fastq.readline()", "def processReads( fPaths,qseq,outDir,paired,storeUnpaired,minLen,qualityTh,combinedFastQ=False,ASCII_offset_33=True,replace=False,verbose=False,qseqEnds=('.txt','.gz') ):\n #define quality score type\n if ASCII_offset_33: ASCII_offset=33 #sanger\n else: ASCII_offset=64 #solexa/illumina\n #info\n if verbose: \n if qseq:\n print \"\\nConverting gerald file(s) %s into fastq and filtering...\" % fPaths, datetime.now()\n else:\n print \"\\nFiltering FastQ file(s) %s...\" % fPaths, datetime.now()\n #process geralds\n i=filterSkip=single=0\n ###\n #process paired-ends reads\n if paired: \n #define file names for output\n outFnameF =os.path.join( outDir,'q%s_1.fastq' % qualityTh )\n outFnameR =os.path.join( outDir,'q%s_2.fastq' % qualityTh )\n unpairedFname =os.path.join( outDir,'q%s.unpaired.fastq' % qualityTh )\n outCombinedFname=os.path.join( outDir,'q%s.combined.fastq' % qualityTh )\n #check if out file exists\n if replace: pass\n elif os.path.isfile( outFnameF ) or os.path.isfile( outFnameR ):\n print \" At least one of out files exist: %s or %s. Exitting.\" % ( outFnameF,outFnameR )\n return\n #open files for writting\n outFileF=open( outFnameF,'wb' )\n outFileR=open( outFnameR,'wb' )\n #open out file for unpaired reads\n if storeUnpaired: outUnpaired =open( unpairedFname,'wb' )\n else: outUnpaired =False\n #open out file for combined FastQ\n if combinedFastQ: combinedOutFile =open( outCombinedFname,'wb' )\n else: combinedOutFile =False\n #store pairs of filenames\n fnames_pair=[]\n #process all input files\n for fname in fPaths:\n fnames_pair.append(fname)#; print fnames_pair\n if len(fnames_pair)!=2: continue\n #get F and R qseq fnames\n fPathF,fPathR=fnames_pair \n #proces qseq files: GERALD->FASTA\n p_i,p_filterSkip,p_single = filterPaired( qseq,fPathF,fPathR,outFileF,outFileR,combinedOutFile,outUnpaired,minLen,qualityTh,ASCII_offset )\n #print info\n if verbose: \n print '',fnames_pair,p_i,p_filterSkip,datetime.now()\n #update read counts\n i +=p_i\n filterSkip +=p_filterSkip\n single +=p_single\n #reset fnames\n fnames_pair=[]\n #close outfiles\n outFileF.close(); outFileR.close()\n #store optional out files\n if storeUnpaired: outUnpaired.close()\n if combinedFastQ: combinedOutFile.close()\n #print info for run\n print '\\tProcessed pairs: %s. Filtered: %s. Reads pairs included: %s [%.2f%s]. Singletons: %s [%.2f%s]' % ( i,filterSkip,(i-filterSkip),(i-filterSkip)*100.0/i,'%',single,single*100.0/i,'%' )\n \n ####\n #for single reads\n else: \n #define out fname\n outFnameF=os.path.join( outDir,'q%s.fastq' % ( qualityTh ) )\n #check if out file exists\n if replace: pass\n elif os.path.isfile( outFnameF ):\n print \" Out file exists: %s. Exitting.\" % ( outFnameF, )\n return\n #open files for writting\n outFileF=open( outFnameF,'wb' )\n #process all files as single reads\n for fPathF in fPaths: \n #proces qseq file: GERALD->FASTA\n p_i,p_filterSkip,p_single = filterSingle( qseq,fPathF,outFileF,minLen,qualityTh,ASCII_offset )\n #print info\n if verbose: print '',fPathF,p_i,p_filterSkip\n #update read counts\n i +=p_i\n filterSkip +=p_filterSkip\n #close outfile\n outFileF.close()\n #print info for run\n print '\\tProcessed reads: %s. Filtered: %s. Reads included: %s [%.2f%s].' % ( i,filterSkip,(i-filterSkip),(i-filterSkip)*100.0/i,'%' )", "def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)", "def FastqGGIterator(handle):\n handle_readline = handle.readline\n while True:\n line = handle_readline()\n if line == \"\" : return\n if line[0] == \"@\":\n break\n while True:\n title_lines = []\n seq_strings = []\n quality_strings = []\n count = 0\n while count < 2 :\n if line[0] != \"@\":\n raise ValueError(\"Bad formatting of record start lines.\")\n title_line = line[1:].rstrip()\n title_lines.append(title_line)\n seq_string = handle_readline().rstrip()\n seq_strings.append(seq_string)\n while True:\n line = handle_readline()\n if not line:\n raise ValueError(\"End of file without quality info.\")\n if line[0] == \"+\":\n second_title = line[1:].rstrip()\n if second_title and second_title != title_line:\n raise ValueError(\"Seq and qual captions differ.\")\n break\n seq_string += line.rstrip() #removes trailing newlines\n if \" \" in seq_string or \"\\t\" in seq_string:\n raise ValueError(\"Whitespace not allowed in the sequence.\")\n seq_len = len(seq_string)\n quality_string = handle_readline().rstrip()\n quality_strings.append(quality_string)\n while True:\n line = handle_readline()\n if not line : break #end of file\n if line[0] == \"@\":\n if len(quality_string) >= seq_len:\n break\n quality_string += line.rstrip()\n if seq_len != len(quality_string):\n raise ValueError(\"Lengths of seq and qual values differs \"\n \" for %s (%i and %i).\" \\\n % (title_line, seq_len, len(quality_string)))\n count +=1\n yield (title_lines, seq_strings, quality_strings)\n if not line : return #StopIteration at end of file\n assert False, \"Should not reach this line\"", "def parse_qual(infile, full_header=False):\n for rec in FastaFinder(infile):\n curr_id = rec[0][1:]\n curr_qual = ' '.join(rec[1:])\n try:\n parts = np.asarray(curr_qual.split(), dtype=int)\n except ValueError:\n raise RecordError(\n \"Invalid qual file. Check the format of the qual files.\")\n if full_header:\n curr_pid = curr_id\n else:\n curr_pid = curr_id.split()[0]\n yield (curr_pid, parts)", "def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta", "def fastq2Sequence(path2file):\n\tsequences = []\n\tfor bioSeqRecord_obj in SeqIO.parse(path2file,\"fastq\"):\n \t\tsequences.append(Sequence.Sequence(bioSeqRecord_obj))\n \treturn sequences", "def filter_fastq (fastq_file, spacer, gene_distance_from_umi):\n seq_to_umi = defaultdict(dict)\n regex_obj = re.compile(r'(?P<pre_umi>.+' + spacer + r')(?P<umi>.{10})(?P<oligo_dt>.{21})(?P<gene>.+)')\n with gzip.open(fastq_file, 'rb') as fh:\n for header in fh:\n seq = fh.readline()\n sep = fh.readline()\n ascii_line = fh.readline()\n [header, seq, sep, ascii_line] = [line.rstrip().decode('ascii') for line in\n [header, seq, sep, ascii_line]]\n match_obj = regex_obj.search(seq)\n if match_obj:\n offset_pos = len(match_obj.group('pre_umi')) + len(match_obj.group('umi')) + \\\n len(match_obj.group('oligo_dt'))\n print(header, seq[offset_pos:], sep, ascii_line[offset_pos:], sep = \"\\n\")\n seq_to_umi[header] = match_obj.group('umi')\n return seq_to_umi", "def parseFASTA(fastaFNH):\n recs = []\n seq = []\n seqID = \"\"\n descr = \"\"\n\n for line in file_handle(fastaFNH):\n line = line.strip()\n if line[0] == \";\":\n continue\n if line[0] == \">\":\n # conclude previous record\n if seq:\n recs.append(FASTARecord(seqID, descr, \"\".join(seq)))\n seq = []\n # start new record\n line = line[1:].split(None, 1)\n seqID, descr = line[0], line[1]\n else:\n seq.append(line)\n\n # catch last seq in file\n if seq:\n recs.append(FASTARecord(seqID, descr, \"\".join(seq)))\n return recs", "def read_fasta(filename):\n\n fasta_file = open(filename) # Open file at filename\n line = fasta_file.readline() # Read first line\n first = True # Boolean determining if it is the first sequence in file\n sequence = \"\" # Stores sequence information\n seqlist = [] # List to store tuples of seqid,sequence information\n while fasta_file:\n if first and line.startswith(\">\"):\n # If first occurrence of \">\", treat it a bit different\n sequence_identifier = line.rstrip()\n line = fasta_file.readline()\n first = False\n \n elif line.startswith(\">\") and not first:\n # Store complete (previous) sequence and id in tuple into list\n sequence = \"\".join(sequence.split('\\n')) # Remove embedded newlines\n seqlist.append((sequence_identifier,sequence)) \n \n sequence = \"\" # Reset sequence string\n sequence_identifier = line.rstrip() # Set new seqid\n line = fasta_file.readline()\n \n elif line == \"\": #EOF\n # Store complete (last) sequence and id in tuple into list\n sequence = \"\".join(sequence.split('\\n')) # Remove embedded newlines\n seqlist.append((sequence_identifier,sequence))\n break\n \n else: # This is sequence territory\n sequence = ''.join([sequence,line])\n line = fasta_file.readline()\n\n return seqlist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a hash map between kmers and readings.
def kmerHashMap(reads, k): kmers_dict = {} # loop through all reads for i in range(len(reads)): # loop read's bases, except for the last k, to obtain its kmers for j in range(1+len(reads[i])-k): kmer = reads[i][j:k+j] if kmers_dict.has_key(kmer): kmers_dict[kmer].add(i) else: kmers_dict[kmer] = set([i]) return kmers_dict
[ "def create_hash_map(self):\n map = {}\n for brand in self.brands:\n for beer in brand.beers:\n key = round(beer.price, -2)\n if key in map:\n map[key].append(beer)\n else:\n map[key] = [beer]\n return map", "def kmers(self):\n return []", "def kmer_hd_dict(kmer):\n hd_dict = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: []}\n for kmer2 in all_kmers:\n hd = hamming_distance(kmer, kmer2)\n hd_dict[hd].append(kmer2)\n for key in hd_dict:\n hd_dict[key] = set(hd_dict[key])\n return hd_dict", "def get_kmer_frequencies(kmers):\r\n frequencies = defaultdict(lambda: 0)\r\n for kmer in kmers:\r\n frequencies[kmer] += 1\r\n return frequencies", "def wafer_map(self):\n result = OrderedDict()\n\n tube_to_tele = dict()\n for tele, props in self.data[\"telescopes\"].items():\n for tb in props[\"tubes\"]:\n tube_to_tele[tb] = tele\n\n wafer_to_tube = dict()\n for tb, props in self.data[\"tubes\"].items():\n for wf in props[\"wafers\"]:\n wafer_to_tube[wf] = tb\n\n crate_to_card = dict()\n for crate, props in self.data[\"crates\"].items():\n for card in props[\"cards\"]:\n crate_to_card[card] = crate\n\n result[\"cards\"] = {x: y[\"card\"]\n for x, y in self.data[\"wafers\"].items()}\n result[\"crates\"] = {x: crate_to_card[y[\"card\"]]\n for x, y in self.data[\"wafers\"].items()}\n result[\"bands\"] = {x: y[\"bands\"]\n for x, y in self.data[\"wafers\"].items()}\n result[\"tubes\"] = wafer_to_tube\n result[\"telescopes\"] = {x: tube_to_tele[wafer_to_tube[x]] for x in\n list(self.data[\"wafers\"].keys())}\n return result", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def _build_sample_map(self):\n result = {}\n rows = [\n (lane, lib.name)\n for lib in self.flow_cell.libraries\n for lane in lib.lanes]\n i = 1\n for _, name in sorted(set(rows)):\n if name not in result:\n result[name] = 'S{}'.format(i)\n i += 1\n return result", "def _make_hash(self):\n my_hash = {}\n for feature_id, feature in self.feature_dict.items():\n bins = self._get_hash_bins(feature)\n chrom = feature.spanning_segment.chrom\n strand = feature.spanning_segment.strand\n if chrom not in my_hash.keys():\n my_hash[chrom] = {}\n my_hash[chrom][\"+\"] = {}\n my_hash[chrom][\"-\"] = {}\n for b in bins:\n try:\n my_hash[chrom][strand][b].append(feature_id)\n except KeyError:\n my_hash[chrom][strand][b] = [feature_id]\n return my_hash", "def get_results_for_init(self):\n return dict(init=self.centroids, n_clusters=self.centroids.shape[0])", "def meters(self):\n return {**self.prepend_name_dict(self._prefixes[0], self._train_metrics.meters),\n **self.prepend_name_dict(self._prefixes[1], self.validator.meters)}", "def build_map(model: str, n: int, kwc: int) -> Map:\n PKWS.clear()\n fited = cluster(n, model)\n return Map(\n cats=list(map(\"c-{}\".format, range(1, n + 1))),\n kws=list(\n map(\n lambda c: \", \".join(\n map(\n lambda x: x[0],\n count_it(\n Counter(\n chain.from_iterable(\n map(\n lambda ie: model == \"bert\"\n and SS_BERT.get(YS[model][ie[0]], [])\n or model == \"glove\"\n and SS_TFIDF[ie[0]]\n or SS_GLOVE[ie[0]],\n filter(\n lambda ie: ie[1] == c,\n enumerate(fited),\n ),\n ),\n )\n ),\n kwc,\n ),\n )\n ),\n range(n),\n )\n ),\n points=list(\n map(\n lambda y, x_y, x: Point(\n question=y, x=x_y[0], y=x_y[1], catagory=x,\n ),\n YS[model],\n XY[model],\n fited,\n )\n ),\n )", "def _build_topic_to_range_map(watermarks):\n return {\n topic: {\n partition: {\n 'high': marks.highmark,\n 'low': marks.lowmark\n }\n for partition, marks in watermarks_map.items()\n }\n for topic, watermarks_map in watermarks.items()\n }", "def ewriters():\n return dict(_ewriters)", "def _build_topic_to_consumer_topic_state_map(watermarks):\n return {\n topic: ConsumerTopicState({\n partition: int((marks.highmark + marks.lowmark) / 2)\n for partition, marks in watermarks_map.items()\n }, None)\n for topic, watermarks_map in watermarks.items()\n }", "def build_map(id, num_of_rooms):\n rooms = {}\n room_count = 0\n while room_count < num_of_rooms:\n rooms[id] = get_room(id)\n id += 1\n room_count += 1\n return rooms", "def get_results_for_init(self):\n n_components = self.centroids.shape[0]\n return dict(means_init=self.centroids)", "def gen_beer_map(self):\n #print(\"\\n\\n\\n\\n BEER MAP\")\n d = Path.cwd()\n beer_map = csv.reader(open([f for f in d.parent.joinpath(\"data\").iterdir() if \"Beer_Style_Mapping.csv\" in f.name][0]))\n beer_map_dict = {}\n for beer, style in beer_map:\n beer_map_dict[beer] = style\n return beer_map_dict", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def get_clusters(self):\n dict_clusters = dict()\n predictions = self.get_predictions(self.data_train)\n df_cluster = pd.DataFrame({'target': self.target_train, 'cluster': predictions})\n total_read = df_cluster[df_cluster[\"target\"] == 1].shape[0]\n total_read_cluster0 = df_cluster[(df_cluster[\"target\"] == 1) & (df_cluster[\"cluster\"] == 0)].shape[0]\n if total_read_cluster0 > total_read/2:\n dict_clusters[\"read\"] = 0\n dict_clusters[\"notread\"] = 1\n else:\n dict_clusters[\"notread\"] = 0\n dict_clusters[\"read\"] = 1\n\n return dict_clusters" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the squared L2 norm of the pattern.
def l2_norm(pattern): return np.linalg.norm(pattern)
[ "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def l2_norm(params):\n flattened, _ = weights_flatten(params)\n return np.dot(flattened, flattened)", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def l2_norm(x):\n result = 0.0\n for i in range(x.shape[0]):\n result += x[i] ** 2\n return np.sqrt(result)", "def L_2_norm(matrix):\n return np.linalg.norm(matrix)", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def L2norm(arr):\n return sqrt(add.reduce(arr**2))", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def l2(vec):\n return np.linalg.norm(vec)", "def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def l2_norm(self, element=None):\n import bempp.api\n from bempp.api.integration import gauss_triangle_points_and_weights\n import numpy as np\n\n # L2-Norm on the whole space\n if element is None:\n mass = bempp.api.operators.boundary.sparse.identity(\n self.space, self.space, self.space,\n parameters=self.parameters).weak_form().sparse_operator\n vec = self.coefficients\n return np.sqrt(np.abs(vec.conjugate().T.dot(mass.dot(vec))))\n\n # L2-Norm on a single element\n res = 0\n accuracy_order = self.parameters.quadrature.far.single_order\n points, weights = gauss_triangle_points_and_weights(accuracy_order)\n\n element_list = [element] if element is not None else list(\n self.grid.leaf_view.entity_iterator(0))\n\n for element in element_list:\n integration_elements = element.geometry.integration_elements(\n points)\n abs_surface_value_squared = np.sum(\n np.abs(self.evaluate(element, points))**2, axis=0)\n res += np.sum(abs_surface_value_squared *\n weights * integration_elements)\n\n return np.sqrt(res)", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def norm(self):\n return math.sqrt((self.__pow__(2)).quad())", "def norm_2(vector):\n return math.sqrt(vector.x ** 2 + vector.y ** 2 + vector.z ** 2)", "def L2_norm(self, mvals):\n if self.p == 2.0:\n mvals = np.array(mvals)\n DC0 = np.dot(mvals, mvals)\n DC1 = 2*np.array(mvals)\n DC2 = 2*np.eye(len(mvals))\n else:\n mvals = np.array(mvals)\n m2 = np.dot(mvals, mvals)\n p = float(self.p)\n DC0 = m2**(p/2)\n DC1 = p*(m2**(p/2-1))*mvals\n DC2 = p*(m2**(p/2-1))*np.eye(len(mvals))\n DC2 += p*(p-2)*(m2**(p/2-2))*np.outer(mvals, mvals)\n return DC0, DC1, DC2", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the l2 norm of a stack of patterns.
def l2_norm_batch(pattern_stack): return np.linalg.norm(pattern_stack, axis=0)
[ "def l2_norm(pattern):\n return np.linalg.norm(pattern)", "def l2_norm(params):\n flattened, _ = weights_flatten(params)\n return np.dot(flattened, flattened)", "def L2norm(arr):\n return sqrt(add.reduce(arr**2))", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def l2_norm(tensors):\n flattened = [tensor.as_tensor_variable(t).flatten() for t in tensors]\n flattened = [(t if t.ndim > 0 else t.dimshuffle('x'))\n for t in flattened]\n joined = tensor.join(0, *flattened)\n return tensor.sqrt(tensor.sqr(joined).sum())", "def L_2_norm(matrix):\n return np.linalg.norm(matrix)", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)", "def L2_norm(self, mvals):\n if self.p == 2.0:\n mvals = np.array(mvals)\n DC0 = np.dot(mvals, mvals)\n DC1 = 2*np.array(mvals)\n DC2 = 2*np.eye(len(mvals))\n else:\n mvals = np.array(mvals)\n m2 = np.dot(mvals, mvals)\n p = float(self.p)\n DC0 = m2**(p/2)\n DC1 = p*(m2**(p/2-1))*mvals\n DC2 = p*(m2**(p/2-1))*np.eye(len(mvals))\n DC2 += p*(p-2)*(m2**(p/2-2))*np.outer(mvals, mvals)\n return DC0, DC1, DC2", "def l2_norm(a,b):\n\n if a.ndim == 1:\n return np.sqrt(np.sum((a-b)**2))\n else:\n return np.sqrt(np.sum((a-b)**2, axis=1))", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def l2_norm(x):\n result = 0.0\n for i in range(x.shape[0]):\n result += x[i] ** 2\n return np.sqrt(result)", "def l2_norm(x, y):\n x = tf.cast(x, dtype=tf.float32)\n y = tf.cast(y, dtype=tf.float32)\n x_sqr = tf.expand_dims(tf.reduce_sum(x * x, 1), -1) # [length, 1]\n y_sqr = tf.expand_dims(tf.reduce_sum(y * y, 1), -1) # [length, 1]\n xy = tf.matmul(x, tf.transpose(y)) # [length, length]\n dist_mat = x_sqr + tf.transpose(y_sqr) - 2 * xy\n return dist_mat", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def l2(vec):\n return np.linalg.norm(vec)", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the inner product of the two patterns as a vecter.
def inner_product(pattern_one, pattern_two): return np.sum(np.multiply(pattern_one, pattern_two))
[ "def inner_product(v1, v2):\r\n\r\n return v1.x * v2.x + v1.y * v2.y", "def vector_inner_product(self, a, b):\n\t\tassert(isinstance(a, types.ListType))\n\t\tassert(isinstance(b, types.ListType))\n\t\treturn reduce(operator.add, map(operator.mul, a, b))", "def _inner_product(self, z1, z2):\r\n return z1.real * z2.real + z1.imag * z2.imag", "def innerProduct(a,b):\n inner = 0 #initial varaiable return at end\n for i in range(len(a)):# run through each entry in the vector\n inner += a[i] * b[i] # multiply one entry from a to another b\n return inner # return the final result", "def vectorInnerProd(x, y):\n # check vector size\n assert len(x) == len(y), 'vectorInnerProd: 2 array do not have same size.'\n # compute inner product and return inner product\n return sum([xElem * yElem for xElem, yElem in zip(x, y)])", "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def _outer_product(cls, a, b):\n return np.reshape(np.outer(a, b), a.shape + b.shape)", "def vectorize_sumproducts(x, y):\n\n return np.sum(np.outer(x, y))", "def dot_product(v1, v2):\n return sum([float(i)*j for (i, j) in zip(v1, v2)])", "def inefficient_outer(x, y):\n result = np.zeros((len(x), len(y))) \n for i in range(len(x)):\n for j in range(len(y)):\n result[i, j] = x[i]*y[j]\n \n return result", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def dotproduct(x, y):\n return sum(vector_apply(lambda a, b: a * b, x, y))", "def inner_product(self, orb1, orb2):\n if isinstance(orb1, int):\n orb1 = self.molecular_orbitals[orb1]\n if isinstance(orb2, int):\n orb2 = self.molecular_orbitals[orb2] \n \n coeff1 = orb1.coefficients\n coeff2 = orb2.coefficients\n return numpy.dot(numpy.dot(coeff1, self.overlap()), coeff2)", "def vdot(a, b):\n return np.einsum('...i,...i->...', a, b)", "def productinterno(vector1,vector2):\r\n daga = adjuntamatriz(vector1)\r\n res = multiplicarmatriz(daga,vector2)\r\n if res[1] == 0:\r\n return res[0]\r\n return res", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def _inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):\n return super().inner_product(tangent_vec_a, tangent_vec_b,\n base_point=base_point)", "def productoVectorial(x1, y1, z1, x2, y2, z2):\n return y1*z2 - z1*y2, z1*x2 - x1*z2, x1*y2 - y1*x2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the inner product pair of each pattern in batch one and batch two. Notice that the pattern_stack_one variable represent the pattern along the zero dimension while the pattern_stack_two variable represent patterns along dimension one in the final distance matrix.
def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two): """ Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the other half. """ holder = np.zeros((pattern_num_one, pattern_num_two)) for l in range(pattern_num_one): for m in range(pattern_num_two): holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m])) return holder
[ "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def _outer_product(cls, a, b):\n return np.reshape(np.outer(a, b), a.shape + b.shape)", "def generate_pattern_grid(words1, words2):\n # Convert word lists to integer arrays\n w1, w2 = (\n np.array([[ord(c) for c in w] for w in words], dtype=np.uint8)\n for words in (words1, words2)\n )\n\n if len(w1) == 0 or len(w2) == 0:\n return np.zeros((len(w1), len(w2)), dtype=np.uint8)\n\n # equality_grid[a, b, i, j] represents whether the ith letter\n # of words1[a] equals the jth letter of words2[b]\n equality_grid = np.zeros((len(w1), len(w2), 5, 5), dtype=bool)\n for i, j in it.product(range(5), range(5)):\n equality_grid[:, :, i, j] = np.equal.outer(w1[:, i], w2[:, j])\n\n patterns = np.zeros((len(w1), len(w2)), dtype=np.uint8)\n three_pows = (3**np.arange(5)).astype(np.uint8)\n for i, tp in enumerate(three_pows):\n # This accounts for yellow squares\n patterns[:, :] += tp * equality_grid[:, :, i, :].any(2)\n # This accounts for green squares\n patterns[:, :] += tp * equality_grid[:, :, i, i]\n\n return patterns", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def dim_mul(dims1, dims2):\n return (\n dims1[0] + dims2[0],\n dims1[1] + dims2[1],\n dims1[2] + dims2[2],\n dims1[3] + dims2[3],\n dims1[4] + dims2[4],\n dims1[5] + dims2[5],\n dims1[6] + dims2[6],\n )", "def axis_element_wise_multiplication(t1, t2, which_axis):\n # assert len(K.int_shape(t1)) == len(K.int_shape(t2)) + 1, \"rank(t1) should be rank(t2) + 1\"\n slices = tf.unstack(t1, axis=which_axis)\n # assert K.int_shape(slices[0]) == K.int_shape(t2), \"Slices of t1 were not the same shape as t2\"\n multiplies = []\n for s in slices:\n multiplies.append(t2 * s)\n return tf.stack(multiplies, axis=2)", "def inner_product(fdatagrid, fdatagrid2):\n if fdatagrid.dim_domain != 1:\n raise NotImplementedError(\"This method only works when the dimension \"\n \"of the domain of the FDatagrid object is \"\n \"one.\")\n # Checks\n if not np.array_equal(fdatagrid.sample_points,\n fdatagrid2.sample_points):\n raise ValueError(\"Sample points for both objects must be equal\")\n\n # Creates an empty matrix with the desired size to store the results.\n matrix = np.empty([fdatagrid.n_samples, fdatagrid2.n_samples])\n # Iterates over the different samples of both objects.\n for i in range(fdatagrid.n_samples):\n for j in range(fdatagrid2.n_samples):\n # Calculates the inner product using Simpson's rule.\n matrix[i, j] = (scipy.integrate.simps(\n fdatagrid.data_matrix[i, ..., 0] *\n fdatagrid2.data_matrix[j, ..., 0],\n x=fdatagrid.sample_points[0]\n ))\n return matrix", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def _inner_product(self, z1, z2):\r\n return z1.real * z2.real + z1.imag * z2.imag", "def mult2(M1, M2):\r\n return [[M1[i][j] * M2[i][j] for j in range(len(M1))] for i in range(len(M2))]", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def _matrix_vector_product_of_stacks(A, b):\n return np.einsum(\"ijk,ik->ij\", A, b)", "def _shift_product(data1, data2, shift, order=3, mask=None, prefilter=False):\n\n if mask is None:\n mask = numpy.ones(data1.shape)\n\n shifted = scipy.ndimage.shift(data2, [shift, 0], order=order, prefilter=prefilter)\n\n return (data1 * shifted * mask).sum()", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def dot_batch(x1, x2):\n\n batch = x1.shape[0]\n return torch.reshape(x1*x2, (batch, -1)).sum(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply np.exp( matrix/two_sigma_square) elementwise.
def gaussian_dense(matrix, two_sigma_square): return np.exp(- matrix / two_sigma_square)
[ "def expval(op, dm):\n return np.tensordot(op, dm, ([0, 1], [0, 1]))", "def kernel_sqExp(a,b, ls=1, sv=1):\n a = a.T/ls\n b = b.T/ls\n D, n = np.shape(a)\n d, m = np.shape(b)\n sqdist = np.tile((a**2).T, [1, m]) + np.tile(b*b, [n, 1]) - 2*np.dot(a.T,b)\n my_kernel = (sv**2) * np.exp(-0.5*sqdist)\n \n # written all out to illustrate (need to make sure a, b are in original dimensions):\n# my_kernel2 = np.zeros((n, m))\n# for i in range(n):\n# for j in range(m):\n# ai = a[i]\n# bj = b[j]\n# my_kernel2[i, j] = np.exp(-1/(2*ls**2) * (ai-bj)**2 )\n# my_kernel2 = my_kernel2 * (sv**2)\n \n return my_kernel", "def expms(A, eig=np.linalg.eigh):\r\n # TODO: check that this works reliably for low rank matrices\r\n # first: symmetrize A\r\n D, B = eig(A)\r\n return np.dot(B, (np.exp(D) * B).T)", "def gaussian2d(params, ellipse):\n return np.exp(-ellipse.R2)", "def exp(self):\n if not self.is_square:\n raise NonSquareMatrixError(\"Exponentiation is valid only for square matrices\")\n try:\n U, D = self.diagonalize()\n except MatrixError:\n raise NotImplementedError(\"Exponentiation is implemented only for diagonalizable matrices\")\n for i in xrange(0, D.rows):\n D[i, i] = C.exp(D[i, i])\n return U * D * U.inv()", "def get_est_exp_discount_function(self,params):\n params = params[0:5]\n df = pd.DataFrame(self.maturity.apply(lambda x: x ** i) for i in range(1, 6)).T\n df.columns = ['M1', 'M2', 'M3', 'M4', 'M5']\n return np.exp(df.dot(params))", "def multiexp(X, *, wmin=None, wmax=None):\n return multisymapply(X, torch.exp, wmin=wmin, wmax=wmax)", "def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))", "def test_perform_sigm_times_exp(self):\r\n x, y, z, t = tensor.vectors('x', 'y', 'z', 't')\r\n exp = tensor.exp\r\n\r\n def ok(expr1, expr2):\r\n trees = [parse_mul_tree(e) for e in (expr1, expr2)]\r\n perform_sigm_times_exp(trees[0])\r\n trees[0] = simplify_mul(trees[0])\r\n good = theano.gof.graph.is_same_graph(\r\n compute_mul(trees[0]),\r\n compute_mul(trees[1]))\r\n if not good:\r\n print trees[0]\r\n print trees[1]\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[0]))\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[1]))\r\n assert good\r\n ok(sigmoid(x) * exp(-x), sigmoid(-x))\r\n ok(-x * sigmoid(x) * (y * (-1 * z) * exp(-x)),\r\n -x * sigmoid(-x) * (y * (-1 * z)))\r\n ok(-sigmoid(-x) *\r\n (exp(y) * (-exp(-z) * 3 * -exp(x)) *\r\n (y * 2 * (-sigmoid(-y) * (z + t) * exp(z)) * sigmoid(z))) *\r\n -sigmoid(x),\r\n sigmoid(x) *\r\n (-sigmoid(y) * (-sigmoid(-z) * 3) * (y * 2 * ((z + t) * exp(z)))) *\r\n -sigmoid(x))\r\n ok(exp(-x) * -exp(-x) * (-sigmoid(x) * -sigmoid(x)),\r\n -sigmoid(-x) * sigmoid(-x))\r\n ok(-exp(x) * -sigmoid(-x) * -exp(-x),\r\n -sigmoid(-x))", "def compute_mse(e):\n return 1 / 2 * np.mean(e**2)", "def compute_mse(e):\r\n return 1/2*np.mean(e**2)", "def noisify_exp(matrix, val):\n l = abs(np.max(matrix) - np.min(matrix))\n return matrix + np.random.exponential(l / val, matrix.shape)", "def exp2(t, a0, a1, b1, a2, b2):\n return a0 - a1 * np.exp(-b1 * t) - a2 * np.exp(-b2 * t)", "def exp(mat, target=None):\n\n if not target:\n target = mat\n\n err_code = _eigenmat.apply_exp(mat.p_mat, target.p_mat)\n if err_code:\n raise generate_exception(err_code)\n\n return target", "def _sigma_2(gam, eps):\n s0 = r0**2 * alpha / (3 * eps) / mec2_unit\n\n s1_1 = 16 * (1 - eps + eps**2) * np.log(gam / eps)\n s1_2 = -1 / eps**2 + 3 / eps - 4 - 4 * eps - 8 * eps**2\n s1_3 = -2 * (1 - 2 * eps) * np.log(1 - 2 * eps)\n s1_4 = 1 / (4 * eps**3) - 1 / (2 * eps**2) + 3 / eps - 2 + 4 * eps\n s1 = s1_1 + s1_2 + s1_3 * s1_4\n\n s2_1 = 2 / eps\n s2_2 = (4 - 1 / eps + 1 / (4 * eps**2)) * np.log(2 * gam)\n s2_3 = -2 + 2 / eps - 5 / (8 * eps**2)\n s2 = s2_1 * (s2_2 + s2_3)\n\n return s0 * np.where(eps <= 0.5, s1, s2) * heaviside(gam - eps)", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def logdotexp(A, b):\n sqz = False\n b_bcast = np.expand_dims(b, 0)\n if b.ndim < 2:\n b_bcast = np.expand_dims(b_bcast, -1)\n sqz = True\n\n A_bcast = np.expand_dims(A, -1)\n\n res = logsumexp(A_bcast + b_bcast, axis=1)\n return res.squeeze() if sqz else res", "def mse(image1: np.ndarray, image2: np.ndarray) -> np.ndarray:\n return np.sqrt(np.power((image1 - image2), 2).mean(axis=(-1, -2)))", "def similarity_matrix(points, sigma):\n distances_squared = spherical_distances(points, points)**2\n\n \n return np.exp( -distances_squared / (2.0 * sigma) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function includes the target particle, i.e. the diagonal element along the matrix is set to 1.
def nearest_points_indexes_with_self(matrix, num_to_keep): # Set the diagonal to 1 np.fill_diagonal(matrix, 1) # Get the position for the resulted values sort_arg = np.argsort(matrix, axis=1) return sort_arg[:, : num_to_keep]
[ "def _closest_points(x, y):\n P = x.shape[0]\n idx = np.zeros((P,), dtype=\"uint32\")\n for i in range(P):\n d = np.sum((y - np.expand_dims(x[i], 0))**2, axis=1) # K,\n idx[i] = np.argmin(d)\n return idx", "def find_min_distance():\n return np.argmin(d)", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def closest_to(ctx, row_i, distance, *args, **kwargs):\n\n mat = import_mat(ctx)\n dist = pairwise_distances(mat, mat[row_i].reshape(1, -1),\n metric=ctx.params['metric'])\n dist[row_i] = numpy.inf\n min_i = numpy.argmin(dist)\n if distance:\n print_pair(row_i, min_i, dist.item(min_i))\n else:\n print_pair(row_i, min_i)", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def mat_min(M):\n # take a matrix we pass in, and fill the diagonal with the matrix max. This is\n # so that we don't grab any values from the diag.\n np.fill_diagonal(M, float(\"inf\"))\n\n # figure out the indices of the cell with the lowest value.\n i, j = np.unravel_index(M.argmin(), M.shape)\n np.fill_diagonal(M, 0)\n return i, j", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def closest_row_indices(self, wv, num, metric):\n dist_array = np.ravel(cdist(self._embed_matrix, wv.reshape((1, -1)),\n metric=metric))\n sorted_indices = np.argsort(dist_array)\n\n return sorted_indices[:num]", "def k_nearest_neighbors(d_matrix, k):\n k_matrix = d_matrix.copy()\n rows = len(d_matrix)\n for i in range(rows):\n sorted_indexes = np.argsort(d_matrix[i])\n for index in sorted_indexes[k + 1:]:\n k_matrix[i][index] = 0\n # print(index)\n return k_matrix", "def find_index(array, points):\n\n # if looking for one point only...\n if points.ndim == 1:\n matrix_dist = np.linalg.norm(array - points[:, np.newaxis], axis=0)\n\n return matrix_dist.argmin()\n\n # if looking for multiple points...\n else:\n # create matrix of distances\n matrix_dist = np.linalg.norm(array[:, :, np.newaxis]\n - points[:, np.newaxis, :], axis=0)\n\n return matrix_dist.argmin(0)", "def nearestNeighbor(self, coords, my_index, blacklist):\n min_index, min_distance = 0, 999999999\n my_col, my_row = coords[my_index]\n for i, (col, row) in enumerate(coords):\n if i in blacklist:\n continue\n distance = math.sqrt((my_col-col)**2 + (my_row-row)**2)\n if distance < min_distance:\n min_index, min_distance = i, distance\n return min_index", "def compute_minimum_distance(points):\n result = None\n for i in range(len(points)):\n for j in range(len(points)):\n if i == j:\n continue\n distance = compute_distance(points[i], points[j])\n if result == None:\n result = distance\n elif distance < result:\n result = distance\n return result", "def _nearest_cluster_distance(distances_row, labels, i):\n label = labels[i]\n b = np.min([np.mean(distances_row[labels == cur_label])\n for cur_label in set(labels) if not cur_label == label])\n return b", "def get_index_with_min_abs_score_difference(goals):\n min_diff = 100\n row_index = 0\n i = 0\n for row in goals:\n # print(\"Min.diff: \" + str(min_diff) + \" Row Index: \" + str(row_index) + \" i value: \" + str(i))\n if abs(row[5]- row[6]) < min_diff:\n min_diff = abs(row[5]-row[6])\n row_index = i\n i+=1\n\n else:\n i+=1\n return row_index", "def knn(vector, matrix, k=10):\n\n nearest_idx = []\n\n ### YOUR CODE\n score = []\n for index, row in enumerate(matrix):\n score.append((cos_sim(row, vector), index))\n ### END YOUR CODE\n sorted_vectors = sorted(score, key=lambda x:x[0], reverse=True)\n for i in range(k):\n nearest_idx.append(sorted_vectors[i][1])\n return nearest_idx", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def nearest_neighbors(similarity_matrix, idx, k):\n distances = []\n for x in range(len(similarity_matrix)):\n distances.append((x,similarity_matrix[idx][x]))\n distances.sort(key=operator.itemgetter(1), reverse=True)\n return [d[0] for d in distances[0:k]]", "def matrix_min(mat):\n # Currently, this function is unused, as its result is\n # the same as that of mat_min, and it is not always\n # faster. Left in for reference in case mat_min becomes\n # a bottleneck.\n\n # find the minimum from the upper triangular matrix\n # (not including the diagonal)\n upperTri = np.triu_indices(mat.shape[0], 1)\n minDex = mat[upperTri].argmin()\n\n # find the index in the big matrix. TODO: do so\n # with some algebra.\n triN = mat.shape[0] - 1\n row = 0\n while minDex >= triN:\n minDex -= triN\n triN -= 1\n row += 1\n col = mat.shape[0] - triN + minDex\n return row, col" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate an identity key pair. Clients should only do this once, at install time. the generated IdentityKeyPair.
def generateIdentityKeyPair(): keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair # return IdentityKeyPair(serialized=serialized)
[ "def __generate_asymetric_key_pair(self):\n secret_key = PrivateKey.generate()\n public_key = secret_key.public_key\n return AsymmetricKeyPair(secret_key, public_key)", "def create_keypair(self):\n self.keypair = rsa.generate_private_key(\n public_exponent=65537,\n key_size=4096,\n backend=default_backend()\n )\n self.save_keypair()", "def generate_key_pair():\r\n return RSA.generate(1024)", "def gen_ecies_keypair(self, gen_priv=True) -> keypairs.EncryptingKeypair:\n ecies_keypair = keypairs.EncryptingKeypair()\n if gen_priv:\n ecies_keypair.gen_privkey()\n return ecies_keypair", "def generate_public_key_pair(self):\n private_key = self.crypto._load_keyfile(self.username)\n if private_key:\n public_key = private_key.publickey()\n else:\n public_key, private_key = self.crypto._gen_asymmetric_keypair(2048)\n self.crypto._save_keyfile(self.username, private_key)\n self.pks.put_public_key(self.username, public_key)\n return private_key", "def generate_ssh_keypair():\n key = RSA.generate(2048)\n pubkey = key.publickey()\n return SSHKeyPair(pubkey.exportKey('OpenSSH').decode(), key.exportKey('PEM').decode())", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def generateRSAKeyPair(self, identityName, isKsk = False, keySize = 2048):\n return self._identityManager.generateRSAKeyPair(\n identityName, isKsk, keySize)", "def new_key_pair():\n return rsa.generate_private_key(\n public_exponent=65537,\n key_size=ASYMMETRIC_KEY_SIZE,\n backend=default_backend()\n )", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def __generateIK(self):\n\n self.__ik = self.__KeyPair.generate()", "def create_keypair(self, **attrs):\n return self._create(keypair.Keypair, **attrs)", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate_signing_keypair(self, gen_priv=True) -> keypairs.SigningKeypair:\n ecdsa_keypair = keypairs.SigningKeypair()\n if gen_priv:\n ecdsa_keypair.gen_privkey()\n return ecdsa_keypair", "def generate_rsa_key_pair(self):\n\t\tprint \"Started rsa key generation\"\n\t\tkey = RSA.generate(self.key_size, randfunc=self.random_number_generator)\n\t\t\t\n\t\tpub_key = key.publickey().exportKey()\n\t\tprint pub_key\n\t\t\n\n\t\tpriv_key = key.exportKey()\n\t\tprint \"Private key\", priv_key \n\t\tprint \"Note: Normally, the private key should be protected. For the purposes of this demo, I'm printing it to terminal.\"", "def generateRSAKeyPairAsDefault(\n self, identityName, isKsk = False, keySize = 2048):\n return self._identityManager.generateRSAKeyPairAsDefault(\n identityName, isKsk, keySize)", "def generate_key():\n with Session() as session:\n generate = {\n 'RSA': session.generate_rsa,\n 'AES': session.generate_aes,\n 'EC': session.generate_ec,\n 'DSA': session.generate_dsa\n }[request.vars.type]\n\n size_or_curve = request.vars.size if request.vars.type == 'EC' else int(request.vars.size)\n key_id = None\n try:\n key = generate(\n size_or_curve,\n label=request.vars.label,\n object_id=str(auth.user.id)\n )\n key_id = db.user_keys.insert(\n p11_label=request.vars.label,\n p11_type=request.vars.type,\n p11_size_or_curve=size_or_curve\n )\n except:\n print(\"Error, unable to generate key.\")\n\n session.list_all_objects()\n if key_id is not None:\n key = db(db.user_keys.id == key_id).select().first()\n return response.json(key)\n return response.json(dict(generate_key=None))", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def test_otoroshi_controllers_adminapi_pki_controller_gen_key_pair(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a registration ID. Clients should only do this once, at install time.
def generateRegistrationId(): regId = KeyHelper.getRandomSequence() return regId
[ "def generate_id():\n return str(hex(int(time.time() * 10 ** 7)))[5:]", "def get_id(self) -> str:\n return self._register_id", "def gen_id() -> str:\n # id is set according to the current unix time\n return f'cli-reminder-{time.time()}'", "def gen_uuid():\n return str( uuid.uuid4() )", "def generateId( self ):\n # try to use the uuid module\n try:\n import uuid\n return uuid.uuid1()\n \n # otherwise, use the random module\n except ImportError:\n import random\n return random.randint(-1000000000000, 1000000000000)", "def userIDGen() :\n\treturn __randomString(8)", "def new_uid():\n return str(uuid.uuid1())[:30]", "def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()", "def id_generator(unique_id: str, sensor_type: str) -> str:\n return unique_id + sensor_type", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())", "def CreateNewSessionID() -> str:\n return uuid.uuid4().hex[:6]", "def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))", "def generate_key(self):\n return str(uuid4())", "def _generate_uuid() -> str:\n return str(uuid4())", "def generate_device_id():\n\n # TODO this is awful, makes me sad, but for now also makes demoing\n # easier We might want to look into an auto-configuration feature for\n # devices, such that ids are not input manually on devices\n\n _attempts = 0\n generated_id = ''\n while _attempts < 10 and len(generated_id) == 0:\n _attempts += 1\n new_id = create_id()\n if Device.query.filter_by(id=new_id).first() is None:\n LOGGER.debug(f\" Generated a new device id {new_id}\")\n return new_id\n\n LOGGER.error(f\" Failed to generate unique device_id\")\n raise HTTPRequestError(500, \"Failed to generate unique device_id\")", "def generate_product_number():\n return str(uuid.uuid4())", "def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return '%d%d' % (int(time.time()), unique_id_increment)", "def new_id(self) -> str:\r\n id = str(uuid4())\r\n while id in self.next_id:\r\n id = str(uuid4())\r\n self.next_id.add(id)\r\n return id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a list of PreKeys. Clients should do this at install time, and subsequently any time the list of PreKeys stored on the server runs low. PreKey IDs are shorts, so they will eventually be repeated. Clients should store PreKeys in a circular buffer, so that they are repeated as infrequently as possible. start The starting PreKey ID, inclusive. count The number of PreKeys to generate. the list of generated PreKeyRecords.
def generatePreKeys(start, count): results = [] start -= 1 for i in range(0, count): preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1 results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair())) return results
[ "def _create_keys(self):\n num_clients = self.bft_network.config.num_clients\n if num_clients == 0:\n return []\n cur = bytearray(\"A\", 'utf-8')\n keys = [b\"A....................\"]\n for i in range(1, 2 * num_clients):\n end = cur[-1]\n if chr(end) == 'Z': # extend the key\n cur.append(self.alpha[0])\n else:\n cur[-1] = end + 1\n key = copy.deepcopy(cur)\n # Extend the key to be KV_LEN bytes\n key.extend([ord('.') for _ in range(self.KV_LEN - len(cur))])\n keys.append(bytes(key))\n\n return keys", "def generate_sub_keys(self):\n Keys = []\n for key_num in range(8):\n key = []\n if key_num % 4 == 0:\n for row in range(4):\n key.append(self.K[row][key_num:(key_num+4)])\n else:\n if key_num < 4:\n inter_key = copy.deepcopy(Keys[0])\n key = self.inter_exchange(key=inter_key,\n index=key_num)\n else:\n inter_key = copy.deepcopy(Keys[4])\n key = self.inter_exchange(key=inter_key,\n index=(key_num-4))\n LOG.info('Generated Key %r: %s', key_num, str(key))\n Keys.append(key)\n return Keys", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def import_to_reids(record_count=8):\n\ttry:\n\t\tconn = redis.Redis(host=HOST,port=PORT,password=PASSWD)\n\texcept:\n\t\tprint 'connection error'\n\t\tsys.exit(0)\n\n\t# add to a set,transaction with pipeline\n\ttrans = conn.pipeline(transaction=True) \n\tset_name = 'activation_code'\n\ttry:\n\t\tfor i in xrange(record_count):\n\t\t\tcode = activation_code_generaor()\n\t\t\ttrans.sadd(set_name,code)\n\t\ttrans.execute() #commit all commands at a time\n\t\t# show the code\n\t\tprint'success,number of keys in a set:',conn.scard(set_name)\n\texcept:\n\t\tprint 'error,rollback'\n\t\tsys.exit(0)", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def keys_fetch(self):\n with self.env.begin(write=False) as txn:\n cursor = txn.cursor()\n tot = txn.stat()['entries']\n i = 0\n\n path = self.db_path\n base_name = self.base_path\n cache_file_path = os.path.join(path, '_cache_' + base_name + '.pkl')\n print('cache_file_path = ', cache_file_path) # DEBUG\n\n if os.path.isfile(cache_file_path):\n self.keys = pickle.load(open(cache_file_path, 'rb'))\n self._num_examples = tot\n else:\n keys = []\n for key, _ in cursor:\n i += 1\n if i % 1000 == 0 or i == tot:\n print('Fetching {:>8d} /{:>8d} keys'.format(i, tot),\n end='\\r')\n keys.append(key)\n print('\\nDone.')\n self._num_examples = tot\n self.keys = np.asarray(keys)\n pickle.dump(self.keys, open(cache_file_path, 'wb'))", "def gen_chunks(self,n):\n\t\t\n\t\t#print pri_key\n\t\tlength=len(self.pri_key)\n\t\t#print length\n\t\tchunk_size=length/n\n\t\t#print chunk_size\n\t\tparts=[]\n\t\tfor i in range(0,length,chunk_size):\n\t\t\t#print i\n\t\t\tparts.append(self.pri_key[i:min(i+chunk_size,length)])\n\n\t\t#print parts\n\t\treturn parts", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 8", "def generate_client_ids(count, path, account_id_encryption_key):\n\n # Open the file (or create it if non existing) and read number of lines\n with open(path + account_id_encryption_key, 'a+') as f:\n # Read the number of account id's for the specific key\n num_lines = 0\n f.seek(0)\n for num_lines, l in enumerate(f, 1): pass\n\n # Generate client_ids and append to file\n with open(path + account_id_encryption_key, 'a') as f: \n for x in range(num_lines+1, num_lines+count+1):\n # java -jar crypto.jar 0123456789abcdef 6\n result = subprocess.run(['java', '-jar', 'crypto.jar', account_id_encryption_key, str(x)], stdout=subprocess.PIPE)\n client_id = result.stdout.decode('utf-8')\n print('Adding line: %s %s' % (x, client_id))\n f.write(\"%s %s\" % (x, client_id))", "def get_key_set():\n keys = [0] * n_families\n for i in range(n_families):\n keys[i] = get_key(i)\n\n return keys", "def generateKeys(n, k, r):\n keys = [0]\n while len(keys) < n:\n keys.append(keys[-1] + 1)\n while not uniqueSums(keys, k, r): keys[-1] += 1\n return keys", "def _generate_key_names(self, num_objects):\n objects = []\n for x in range(num_objects):\n obj_name = 'bolt-s3-perf' + str(x)\n objects.append(obj_name)\n return objects", "def made_key(self):\n \n # select a random number from 1 to infinity \n ran_number = random.randint(1,99)\n\n # create a random set based on the first number you chose \n set = xrange(ran_number,28*ran_number,ran_number)\n\n # increase the value of every number in the set \n for item in set:\n item += 3\n Code_Fouad_Teniou.my_key.append(item)\n\n #return a random key \n return Code_Fouad_Teniou.my_key", "def _get_primary_keys(self, table_name, num_rows):\n primary_key = self.metadata.get_primary_key(table_name)\n primary_key_values = None\n\n if primary_key:\n field = self.metadata.get_fields(table_name)[primary_key]\n\n generator = self.primary_key.get(table_name)\n\n if generator is None:\n if field['type'] != 'id':\n raise ValueError('Only columns with type `id` can be primary keys')\n\n subtype = field.get('subtype', 'integer')\n if subtype == 'integer':\n generator = itertools.count()\n remaining = np.inf\n elif subtype == 'string':\n regex = field.get('regex', r'^[a-zA-Z]+$')\n generator = exrex.generate(regex)\n remaining = exrex.count(regex)\n elif subtype == 'datetime':\n raise NotImplementedError('Datetime ids are not yet supported')\n else:\n raise ValueError('Only `integer` or `string` id columns are supported.')\n\n self.primary_key[table_name] = generator\n self.remaining_primary_key[table_name] = remaining\n\n else:\n remaining = self.remaining_primary_key[table_name]\n\n if remaining < num_rows:\n raise ValueError(\n 'Not enough unique values for primary key of table {}'\n ' to generate {} samples.'.format(table_name, num_rows)\n )\n\n self.remaining_primary_key[table_name] -= num_rows\n primary_key_values = pd.Series([x for i, x in zip(range(num_rows), generator)])\n\n return primary_key, primary_key_values", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def generate_keys(modeladmin, request, queryset):\n for player in queryset:\n player.key = id_gen(6)\n player.save()", "def generate_keys(key_size, amount):\n\n from random import choice\n\n chars = 'abcdefghijklmnopqrstuvwxyz'\n\n # We use a set to quickly detect duplicates\n result = set()\n\n while len(result) < amount:\n result.add(\n '__' + ''.join(choice(chars) for _ in range(key_size)) + '__'\n )\n\n # Notice that the returned value is a list, not a set\n return list(result)", "def get_next_keys(self):\n P_List = []\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n P_List.append(int(construct_pass(key, choice)))\n return P_List" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the given reader exists
def exists(reader_name: str) -> bool: return plugins.exists(package_name=__name__, plugin_name=reader_name)
[ "def hasReader(self, reader):\n return self.readermanager.hasReader(reader)", "def find_reader(cls, name):\n names = [r.name for r in cls.reader_list]\n \n if name in names:\n print('读者:%s 已找到!' % name)\n return 1\n else:\n print('读者:%s 找不到!' % name)\n return -1", "def _haveReadLocks(self): \n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if name.startswith(readLockFileName):\n return True\n return False", "def is_client_exist(self,clientid):\n try:\n return True if self.get_resource_metadata(clientid,resource_file=None) else False\n except exceptions.ResourceNotFound as ex:\n return False", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def has_resource(resource_name):", "def reading(self):\r\n return self._read_callback is not None or self._read_future is not None", "def test_read_before_connected(connection, reader, loop):\n value = loop.run_until_complete(connection.read())\n assert not value\n assert not reader.used", "def exists(self):\n try:\n self.dataset\n return True\n except:\n return False", "def check_access(ident):\n resource = data_service.resource_load(uniq = ident)\n log.debug('Result from the database: %s'%resource)\n if resource is None:\n return False\n return True", "def does_resource_exist(resource):\n try:\n resource.load()\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'ValidationError':\n return False\n else:\n raise e", "def is_read_locked(self):\n pass", "def is_exist(self,*args,resource_status=ResourceConstant.NORMAL_RESOURCE,resource_file=\"current\"):\n try:\n return True if self.get_resource_metadata(*args,resource_file=resource_file,resource_status=resource_status) else False\n except exceptions.ResourceNotFound as ex:\n return False", "def feed_exists(self):\n return os.path.exists(self._filename)", "def test_reader_given_filename(reader_test_file):\n reader = hdf5Reader(reader_test_file)\n assert reader.check_type(reader.target) is False\n assert reader.check_type(reader.base) is True\n assert reader._close_base_ is True\n reader.close()\n assert reader._close_base_ is False", "def exists(self):\r\n return self.filereferencedata != {}", "def _object_exists(name):\n conn = sqlite3.connect('/dev/input')\n try:\n cur = conn.cursor()\n sql = 'SELECT ROWID FROM object WHERE name=? AND deleted=0'\n cur.execute(sql, (name, ))\n result = cur.fetchall()\n return len(result) > 0\n finally:\n conn.close()", "def check_dataset(filename=None, url=None):\n try:\n with open(filename, 'r'):\n exists = True\n except IOError:\n exists = False\n\n if not exists:\n retreive_dataset(filename, url)", "def is_file_exists(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get one line documentation for readers If no readers are specified, documentation for all available readers are returned.
def short_docs(*readers: str) -> List[Tuple[str, str]]: if not readers: readers = names() return [(r, plugins.doc(__name__, r, long_doc=False)) for r in readers]
[ "def select_reader():\r\n readers_list = readers()\r\n\r\n if readers_list:\r\n return readers_list[0]", "def read_documentation(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n while lin[0] != ':':\r\n self.documentation.append(lin)\r\n lin = self.read_line(fid)\r\n return lin", "def readers ( self ) :\n return self.__readers", "def get_reader_funcs():\n return READERS", "def _fetch_reader(self):\n \n #get extension and build dict of available readers\n ext = os.path.splitext(self.path)[1].lstrip('.').upper()\n classes = dict(inspect.getmembers(readers, inspect.isclass))\n #attempt reader build\n try:\n return classes[ext](self.path)\n except KeyError:\n msg = 'Files of type {} not supported'\n raise TypeError(msg.format(ext))", "def documentation():\n doclist = ''\n\n doclist += f\"Json {version()}\"\n doclist += 'Help Documentation'\n doclist += '------------------'\n doclist += 'Docs will go here eventually.'\n doclist += ''\n doclist += ''\n doclist += ''\n doclist += ''\n\n docs = '\\n'.join(doclist)\n return docs", "def chain(*readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n\n for e in itertools.chain(*rs):\n yield e\n\n return reader", "def docs():", "def docLines(self):\n summary, description = self._getDocParts()\n if description:\n return summary + [\"\"] + description\n return summary", "def help_doc(self) -> None:\n self.indent = 4\n self.pad = 22\n\n if self.show_categories:\n utils._safe_print(self.parser_categories_text())\n return\n\n for arg in self.args:\n parser_name: str = self.parser_shortname(arg)\n\n if parser_name in parsers:\n p_info: ParserInfoType = parser_info(parser_name, documentation=True)\n compatible: str = ', '.join(p_info.get('compatible', ['unknown']))\n docs: str = p_info.get('documentation', 'No documentation available.')\n version: str = p_info.get('version', 'unknown')\n author: str = p_info.get('author', 'unknown')\n author_email: str = p_info.get('author_email', 'unknown')\n doc_text: str = \\\n f'{docs}\\n'\\\n f'Compatibility: {compatible}\\n\\n'\\\n f'Version {version} by {author} ({author_email})\\n'\n\n utils._safe_pager(doc_text)\n return\n\n utils._safe_print(self.helptext())\n return", "def summarize_rcdocs(modnames, headersep=\"=\", maxdflt=2000):\n nods = \"No docstring provided.\"\n template = \":{0!s}: {1!s}, *default:* {2}.\"\n docstrs = []\n tw = textwrap.TextWrapper(width=80, subsequent_indent=\" \"*4)\n for modname in modnames:\n moddoc = str(modname)\n moddoc += \"\\n\"+ headersep * len(moddoc) + \"\\n\"\n plugins = Plugins([modname], loaddeps=False) # get a lone plugin\n plugins.merge_rcs()\n rc = plugins.rc\n rcdocs = plugins.rcdocs\n for key in sorted(rc._dict.keys()):\n dflt = getattr(rc, key)\n rdflt = repr(dflt)\n rdflt = rdflt if len(rdflt) <= maxdflt else \"{0}.{1} instance\".format(\n dflt.__class__.__module__, dflt.__class__.__name__)\n rcdoc = template.format(key, rcdocs.get(key, nods), rdflt)\n moddoc += \"\\n\".join(tw.wrap(rcdoc)) + '\\n'\n docstrs.append(moddoc)\n return \"\\n\\n\\n\".join(docstrs)", "def documentation(self, level='first'):\n docs = (t.docstring for t in list(self.conjunction.terms) + [self]\n if t.docstring is not None)\n if level.lower() == 'first':\n doc = next(docs, None)\n elif level.lower() == 'top':\n doc = list(docs)\n return doc", "def get_documentation(path=\"\"):\n return \"\"\"<HTML><head><title>Python Minidoc for \"\"\"+path+\"\"\"</title></head>\n <body>\n \"\"\"+get_documentation_body(path)+\"\"\"\n </body></html>\"\"\"", "def getLooks(self):\n pass", "def setReader(self, *args):\n return _yarp.Contactable_setReader(self, *args)", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def __doc__(self, *args):\n\n\t\ttext = list(args)\n\t\toutput = \"\"\n\n\t\tfor i in range(len(text)):\n\t\t\toutput += text[i] + \"\\n\"\n\n\t\treturn output", "def getDoc(self):\r\n return self.__doc__", "def docs():\n\n if not os.path.exists('docs'):\n os.mkdir('docs')\n\n import csv\n rows = [['Name', 'Documentation', 'Types', 'Access']]\n for magicwordKey in spellbook.words:\n magicword = spellbook.words[magicwordKey]\n access = magicword.access if magicword.access else '0'\n doc = magicword.doc.strip() if magicword.doc else 'N\\A'\n rows.append([magicword.name, doc, magicword.types, access])\n\n with open('docs/clientdocs.csv', 'w') as docsFile:\n writer = csv.writer(docsFile, delimiter=',', lineterminator='\\n')\n writer.writerows(rows)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a bytes stream with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_stream( input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any ) -> Reader: if reader_name is None: reader_name = identify(input_stream) reader = plugins.call( package_name=__name__, plugin_name=reader_name, input_stream=input_stream, **reader_args, ) reader.read() return reader
[ "def getreader(encoding):\r\n return lookup(encoding).streamreader", "def read(self, reader):\n self._verify(reader.read())\n return self", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def read_file(\n file_path: Union[str, pathlib.Path],\n reader_name: Optional[str] = None,\n **reader_args: Any,\n) -> Reader:\n with open(file_path, mode=\"rb\") as input_stream:\n return read_stream(input_stream, reader_name)", "def create_reader(stream_id: int, channel: Channel,\n ready_callback: Callable[[int], None] | None = None) -> BufferedReader:\n reader = RawChannelReader(stream_id, channel)\n if ready_callback:\n reader.add_ready_callback(ready_callback)\n return BufferedReader(reader)", "def getReader(self) -> ghidra.app.util.bin.BinaryReader:\n ...", "def _ReadStream(self, stream_name):\n file_object = self._OpenStream(stream_name)\n if not file_object:\n return b''\n\n try:\n data = file_object.read()\n finally:\n file_object.close()\n\n return data", "def _adapter_read(self):\n if self.__reading is None:\n self.__reading = self.__reader.read()\n # One more check. Not sure we need/want to be this strict.\n return_type_name = classname(self.__reading)\n if return_type_name != self._EXPECTED_CONTAINER:\n raise TypeError(\n f\"Expected Reader.read() to returned type \"\n f\"{return_type_name} but type {self._EXPECTED_CONTAINER} \"\n f\"was expected.\")\n return self.__reading", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def bread(fd # type: Union[bytes, str, pathlib.Path, pathlib.PurePath, TextIO, BinaryIO]\r\n ):\r\n # type: (...) -> Union[Tuple, List, OrderedDict, bool, int, str, bytes]\r\n return DEFAULT.read(fd)", "def read(cls, **kwargs):\n if kwargs.get('file'):\n try:\n return Bio.GenBank.read(open(kwargs.get('file')))\n except Exception as e:\n logging.warning(f\"Problem reading: {e}\")\n elif kwargs.get('string'):\n try:\n return Bio.GenBank.read(StringIO(kwargs.get('string')))\n except Exception as e:\n logging.warning(f\"Problem reading: {e}\")\n else:\n logging.debug(f\"{cls.__name__}.read() requires keyword argument: file or string\")", "def reader(filepath):\n filelow = filepath.lower()\n if filelow.endswith('.mp3'):\n return MP3Reader\n if filelow.endswith('.wav') or filelow.endswith('.aif') or filelow.endswith('.aiff') or filelow.endswith('.au'):\n return PCMReader\n return None", "def read_bytes(self, num_bytes, callback, streaming_callback=None):\r\n self._set_read_callback(callback)\r\n assert isinstance(num_bytes, numbers.Integral)\r\n self._read_bytes = num_bytes\r\n self._streaming_callback = stack_context.wrap(streaming_callback)\r\n self._try_inline_read()", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def _get_reader(self, data):\n try:\n if isinstance(data, bytes):\n data = data.decode()\n return csv.reader(\n io.StringIO(data),\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n escapechar=self.escapechar\n )\n except (ValueError, csv.Error):\n return False", "def do_read(fp, decoder):\r\n # read header\r\n header = fp.read(RecordIO.RECORD_HEADER_SIZE)\r\n if len(header) == 0:\r\n log.debug(\"%s has no data (current offset = %d)\" % (fp.name, fp.tell()))\r\n # Reset EOF (appears to be only necessary on OS X)\r\n fp.seek(fp.tell())\r\n return None\r\n elif len(header) != RecordIO.RECORD_HEADER_SIZE:\r\n raise RecordIO.PrematureEndOfStream(\r\n \"Expected %d bytes in header, got %d\" % (RecordIO.RECORD_HEADER_SIZE, len(header)))\r\n blob_len = struct.unpack('>L', header)[0]\r\n if blob_len > RecordIO.MAXIMUM_RECORD_SIZE:\r\n raise RecordIO.RecordSizeExceeded(\"Record exceeds maximum allowable size\")\r\n\r\n # read frame\r\n read_blob = fp.read(blob_len)\r\n if len(read_blob) != blob_len:\r\n raise RecordIO.PrematureEndOfStream(\r\n 'Expected %d bytes in frame, got %d' % (blob_len, len(read_blob)))\r\n return decoder.decode(read_blob)", "def readerb(fhandler, serializer, **serializer_kwargs):\n serializer = _get_serializer(serializer, **serializer_kwargs)\n if serializer.binary_mode:\n fhandler = fhandler.buffer if not 'b' in fhandler.mode else fhandler\n elif 'b' in fhandler.mode:\n raise format_.BadIOMode(serializer, fhandler.mode)\n for element in serializer.load(fhandler):\n yield element", "def test_fast_reader():\n text = \"a b c\\n1 2 3\\n4 5 6\"\n with pytest.raises(ParameterError): # C reader can't handle regex comment\n ascii.read(text, format=\"fast_basic\", guess=False, comment=\"##\")\n\n # Enable multiprocessing and the fast converter\n try:\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": True, \"use_fast_converter\": True},\n )\n except NotImplementedError:\n # Might get this on Windows, try without parallel...\n if os.name == \"nt\":\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": False, \"use_fast_converter\": True},\n )\n else:\n raise\n\n # Should raise an error if fast_reader has an invalid key\n with pytest.raises(FastOptionsError):\n ascii.read(text, format=\"fast_basic\", guess=False, fast_reader={\"foo\": True})\n\n # Use the slow reader instead\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\", fast_reader=False)\n # Will try the slow reader afterwards by default\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a file with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_file( file_path: Union[str, pathlib.Path], reader_name: Optional[str] = None, **reader_args: Any, ) -> Reader: with open(file_path, mode="rb") as input_stream: return read_stream(input_stream, reader_name)
[ "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def reader(filepath):\n filelow = filepath.lower()\n if filelow.endswith('.mp3'):\n return MP3Reader\n if filelow.endswith('.wav') or filelow.endswith('.aif') or filelow.endswith('.aiff') or filelow.endswith('.au'):\n return PCMReader\n return None", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def chooseReader(self, file_format, vtk_dataset_type=None):\n # Handle .ply files.\n if file_format == 'ply':\n return vtk.vtkPLYReader()\n # Handle .vtk files.\n if vtk_dataset_type == 'STRUCTURED_GRID':\n return vtk.vtkStructuredGridReader()\n elif vtk_dataset_type == 'POLYDATA':\n return vtk.vtkPolyDataReader()\n elif vtk_dataset_type == 'UNSTRUCTURED_GRID':\n return vtk.vtkUnstructuredGridReader()", "def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def read(self, reader):\n self._verify(reader.read())\n return self", "def _resolve_reader(self):\n self.fh = self.path.fs.open(self.path, 'rU')\n self.resolved = csv.reader(self.fh, delimiter=self.delimiter)", "def reader(self, descriptor):\n filename = descriptor.name\n logger.debug('Reader for {}'.format(filename))\n\n stat = os.stat(filename)\n reader = self.readers[filename]\n if stat.st_size == reader['previous_stat'].st_size:\n # Ignore calls if the size is the same\n # Should only happen when using the `select` event loop\n return\n elif stat.st_size < reader['previous_stat'].st_size:\n logger.info('Detected rotation on file {} - Sizes {} < {}'.format(\n filename, stat.st_size, reader['previous_stat'].st_size))\n\n self.remove_reader_callback_for_descriptor(descriptor)\n reader['descriptor'], msg = self.create_reader(filename, 1)\n else:\n msg = descriptor.read().decode()\n\n msg = msg.strip()\n self.send_message_to_handlers(msg, reader['handlers'])\n reader['previous_stat'] = stat", "def _fetch_reader(self):\n \n #get extension and build dict of available readers\n ext = os.path.splitext(self.path)[1].lstrip('.').upper()\n classes = dict(inspect.getmembers(readers, inspect.isclass))\n #attempt reader build\n try:\n return classes[ext](self.path)\n except KeyError:\n msg = 'Files of type {} not supported'\n raise TypeError(msg.format(ext))", "def read_file(path, default=None):\n try:\n with open(path) as f:\n return f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n return default\n raise", "def hasReader(self, reader):\n return self.readermanager.hasReader(reader)", "def read(self, location, **user_options):\n\n # Base the options off a copy to leave the Reader options uneffected.\n options = self.options.copy()\n options.update(user_options)\n\n # The directory option allows users to specify file locations relative\n # to a location other than the present working directory by joining the\n # location with the directory of their choice.\n if options.directory:\n location = os.path.join(options.directory, location)\n\n # When passed a directory as the location, the Reader recursively builds\n # a list of replays to return using the utils.get_files function. This\n # function respects the following arguments:\n # * depth: The maximum depth to traverse. Defaults to unlimited (-1)\n # * follow_symlinks: Boolean for following symlinks. Defaults to True\n # * exclude_dirs: A list of directory names to skip while recursing\n # * incldue_regex: A regular expression rule which all returned file\n # names must match. Defaults to None\n #\n replays, files = list(), utils.get_files(location, **options)\n\n # If no files are found, it could be for a variety of reasons\n # raise a NoMatchingFilesError to alert them to the situation\n if not files:\n raise exceptions.NoMatchingFilesError()\n\n for location in files:\n if options.verbose: print \"Reading: %s\" % location\n\n with open(location, 'rb') as replay_file:\n replays.append(self.make_replay(replay_file, **options))\n\n return replays", "def file_reader(parent_dir):\n # type: (str) -> Callable[[str], bytes]\n # Abstracted like this because we want to support reading from S3 in the future.\n\n def _read_file(uri):\n # type: (str) -> bytes\n \"\"\"Read the contents of the specified file.\n\n :param uri: File URI relative to ``parent_dir``\n :return: Binary file contents\n :rtype: bytes\n \"\"\"\n if not uri.startswith(\"file://\"):\n raise ValueError('Only file URIs are supported by \"file_reader\"')\n\n filename = uri[len(\"file://\") :]\n with open(os.path.join(parent_dir, filename), \"rb\") as source:\n return source.read()\n\n return _read_file", "def read_file(filepath):\n for extension, reader in _known_formats.items():\n if filepath.endswith(extension):\n return reader.read(filepath)\n\n # If filetype is not apparent from extension, attempt to detect\n reader = detect_filetype(filepath) \n return reader.read(filepath)", "def _read_file(self, options, datas):\n self.ensure_one()\n # guess mimetype from file content\n mimetype = guess_mimetype(datas)\n (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None))\n if handler:\n try:\n return getattr(self, '_read_' + file_extension)(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %d) using guessed mimetype %s\", self.datas_fname or '<unknown>', self.id, mimetype)\n # try reading with user-provided mimetype\n (file_extension, handler, req) = FILE_TYPE_DICT.get(self.type, (None, None, None))\n if handler:\n try:\n return getattr(self, '_read_' + file_extension)(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %d) using user-provided mimetype %s\", self.datas_fname or '<unknown>', self.id, self.type)\n # fallback on file extensions as mime types can be unreliable (e.g.\n # software setting incorrect mime types, or non-installed software\n # leading to browser not sending mime types)\n if self.datas_fname:\n p, ext = os.path.splitext(self.datas_fname)\n if ext in EXTENSIONS:\n try:\n return getattr(self, '_read_' + ext[1:])(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %s) using file extension\", self.datas_fname, self.id)\n if req:\n raise ImportError(_(\"Unable to load \\\"{extension}\\\" file: requires Python module \\\"{modname}\\\"\").format(extension=file_extension, modname=req))\n raise ValueError(_(\"Unsupported file format \\\"{}\\\", import only supports CSV, ODS, XLS and XLSX\").format(self.type))", "def create_reader(root: Union[str, pathlib.Path]) -> Reader:\n if isinstance(root, (tuple, list)):\n if len(root) == 1:\n return create_reader(root[0])\n return MultiReader(root)\n roots = list(glob.glob(str(root)))\n if len(roots) == 0:\n raise ValueError('Not found {root}')\n if len(roots) > 1:\n return create_reader(roots)\n root = pathlib.Path(roots[0])\n if root.is_dir():\n return FolderReader(root)\n if root.suffix == '.zip':\n return ZipReader(root)\n if root.suffix in ['.tar', '.tgz', '.gz']:\n return TarReader(root)\n raise ValueError(f'Not support {root}')", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test pointwise arithmetic with stencil offsets across two functions in indexed expression format
def test_indexed_stencil(self, expr, result): j, l = dimify('j l') a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base fa = a.function b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base fb = b.function eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
[ "def test_unsubstituted_indexeds():\n grid = Grid(shape=(8, 8, 8))\n\n f = Function(name='f', grid=grid)\n p = TimeFunction(name='p', grid=grid)\n p1 = TimeFunction(name='p', grid=grid)\n\n f.data[:] = 0.12\n p.data[:] = 1.\n p1.data[:] = 1.\n\n eq = Eq(p.forward, sin(f)*p*f)\n\n op0 = Operator(eq)\n op1 = Operator(eq, opt=('advanced', {'linearize': True}))\n\n # NOTE: Eventually we compare the numerical output, but truly the most\n # import check is implicit to op1.apply, and it's the fact that op1\n # actually jit-compiles successfully, meaning that all substitutions\n # were performed correctly\n op0.apply(time_M=2)\n op1.apply(time_M=2, p=p1)\n\n assert np.allclose(p.data, p1.data, rtol=1e-7)", "def test_operator_same_size_global_offset_array(self):\n ndim = 5\n length = 4\n\n for op in STANDARD_OPERATORS | IN_PLACE_OPERATORS:\n for forward in [True, False]:\n (original, offset_array) = self.generate_data(ndim, length)\n operate_param = GlobalOffsetArray(np.ones(offset_array.shape, dtype=offset_array.dtype) * 10,\n global_offset=offset_array.global_offset)\n # itrue div requires floats when doing true division (can't do in place conversion to float)\n if op == operator.itruediv:\n original = original.astype(np.float64)\n offset_array = offset_array.astype(np.float64)\n operate_param = operate_param.astype(np.float64)\n\n # Make sure to compare expected results as a ndarray because operate_param is a GlobalOffsetArray.\n if forward:\n left_expected = original.view(np.ndarray)\n right_expected = operate_param.view(np.ndarray)\n left_offset = offset_array\n right_offset = operate_param\n else:\n # test operation commutativity\n left_expected = operate_param.view(np.ndarray)\n right_expected = original.view(np.ndarray)\n left_offset = operate_param\n right_offset = offset_array\n\n expected_result = op(left_expected, right_expected)\n actual_result = op(left_offset, right_offset)\n\n if op in STANDARD_OPERATORS:\n expected = expected_result\n actual = actual_result\n else:\n expected = original\n actual = offset_array\n\n # ensure global_offset is preserved\n self.assertEqual(offset_array.global_offset, actual.global_offset)\n\n # ensure actual results match that of a regular ndarray\n self.assertTrue(np.array_equal(expected, actual))\n\n # ensure the results that are returned are a copy of an array instead of a view just like ndarray\n expected[tuple([0] * ndim)] = 1337\n actual[actual.global_offset] = 1337\n\n # original arrays were not modified\n self.assertEqual(np.any(original == 1337), np.any(offset_array == 1337))\n\n # Try testing with the operate param with a different global_offset\n operate_param.global_offset = tuple([1337] * ndim)\n with self.assertRaises(ValueError):\n op(left_offset, right_offset)\n\n # Try testing with the operate param with a partially overlapping data\n operate_param.global_offset = tuple(floor(size/2) + offset for size, offset in\n zip(offset_array.shape, offset_array.global_offset))\n with self.assertRaises(ValueError):\n op(left_offset, right_offset)", "def test_range_index_operator_eq_index_1(self):\n def test_impl(index1, index2):\n return index1 == index2\n sdc_func = self.jit(test_impl)\n\n n = 11\n for index1, index2 in product(_generate_range_indexes_fixed(n), repeat=2):\n with self.subTest(index1=index1, index2=index2):\n result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray\n result_ref = test_impl(index1, index2)\n np.testing.assert_array_equal(result, result_ref)", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_index_alignment(const):\n nt = 10\n grid = Grid(shape=(3, 5))\n time = grid.time_dim\n t = grid.stepping_dim\n x, y = grid.dimensions\n order_of_eqn = 1\n modulo_factor = order_of_eqn + 1\n last_time_step_u = nt - order_of_eqn\n u = TimeFunction(name='u', grid=grid, save=nt)\n # Increment one in the forward pass 0 -> 1 -> 2 -> 3\n fwd_eqn = Eq(u.indexed[time+1, x, y], u.indexed[time, x, y] + 1.*const)\n fwd_op = Operator(fwd_eqn)\n fwd_op(time=last_time_step_u, constant=1)\n last_time_step_v = (last_time_step_u) % modulo_factor\n # Last time step should be equal to the number of timesteps we ran\n assert(np.allclose(u.data[last_time_step_u, :, :], nt - order_of_eqn))\n v = TimeFunction(name='v', grid=grid, save=None)\n v.data[last_time_step_v, :, :] = u.data[last_time_step_u, :, :]\n # Decrement one in the reverse pass 3 -> 2 -> 1 -> 0\n adj_eqn = Eq(v.indexed[t-1, x, y], v.indexed[t, x, y] - 1.*const)\n adj_op = Operator(adj_eqn, time_axis=Backward)\n adj_op(t=(nt - order_of_eqn), constant=1)\n # Last time step should be back to 0\n assert(np.allclose(v.data[0, :, :], 0))\n\n # Reset v to run the backward again\n v.data[last_time_step_v, :, :] = u.data[last_time_step_u, :, :]\n prod = Function(name=\"prod\", grid=grid)\n # Multiply u and v and add them\n # = 3*3 + 2*2 + 1*1 + 0*0\n prod_eqn = Eq(prod, prod + u * v)\n comb_op = Operator([adj_eqn, prod_eqn], time_axis=Backward)\n comb_op(time=nt-order_of_eqn, constant=1)\n final_value = sum([n**2 for n in range(nt)])\n # Final value should be sum of squares of first nt natural numbers\n assert(np.allclose(prod.data, final_value))\n\n # Now reset to repeat all the above tests with checkpointing\n prod.data[:] = 0\n v.data[last_time_step_v, :, :] = u.data[last_time_step_u, :, :]\n # Checkpointed version doesn't require to save u\n u_nosave = TimeFunction(name='u_n', grid=grid)\n # change equations to use new symbols\n fwd_eqn_2 = Eq(u_nosave.indexed[t+1, x, y], u_nosave.indexed[t, x, y] + 1.*const)\n fwd_op_2 = Operator(fwd_eqn_2)\n cp = DevitoCheckpoint([u_nosave])\n wrap_fw = CheckpointOperator(fwd_op_2, time=nt, constant=1)\n\n prod_eqn_2 = Eq(prod, prod + u_nosave * v)\n comb_op_2 = Operator([adj_eqn, prod_eqn_2], time_axis=Backward)\n wrap_rev = CheckpointOperator(comb_op_2, time=nt-order_of_eqn, constant=1)\n wrp = Revolver(cp, wrap_fw, wrap_rev, None, nt-order_of_eqn)\n wrp.apply_forward()\n assert(np.allclose(u_nosave.data[last_time_step_v, :, :], nt - order_of_eqn))\n wrp.apply_reverse()\n assert(np.allclose(v.data[0, :, :], 0))\n assert(np.allclose(prod.data, final_value))", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def test_coord_preceding_fs(self):", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_distance_indices(self):\n s1 = self.RNA(\"AUGC\")\n s2 = self.RNA(\"AAGC\")\n\n def f(x, y):\n if x == 2 or y == 2:\n return 10\n return 0\n\n self.assertEqual(s1.distance(s2, f, use_indices=True), 20)", "def test_verify():\n Lx = 10; Ly = 10; c = 1.0\n\n def I(x, y):\n return exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)\n def f(x, y, t):\n return sin(2*x) + y\n def bc(x, y, t):\n return sin(t)\n\n # use string formulas instead so also weave can be tested:\n # (need to transfer globals() so that vectorized versions work)\n I = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 - pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('sin(2*x) + y',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('sin(t)',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n\n #nx = 15; ny = 10; tstop = 2\n nx = 4; ny = 3; tstop = 16\n verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop)", "def test_deriv_binary(func, preserve_result, a, b):\n utils.test_forward_array(func, (0,), preserve_result, a, b)", "def intersection(x, y, f, p):", "def test_pow_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = x**3 + y**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), 0.0)\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2), 9.0)\n assert equals(f.derivative_at((y, y), {x: 1.5, y:2.5}, order=2), 15.0)\n f = (x-y)**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n -6.0*(1.5-2.5))", "def check_indices(f, x, y):\n h = f.shape[0]\n w = f.shape[1]\n return (0 <= y < h) and (0 <= x < w)", "def structured_pow(x, y):\r\n # see decorator for function body\r", "def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format
def test_indexed_buffered(self, expr, result): i, j, l = dimify('i j l') a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base fa = a.function eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_operator_same_size_global_offset_array(self):\n ndim = 5\n length = 4\n\n for op in STANDARD_OPERATORS | IN_PLACE_OPERATORS:\n for forward in [True, False]:\n (original, offset_array) = self.generate_data(ndim, length)\n operate_param = GlobalOffsetArray(np.ones(offset_array.shape, dtype=offset_array.dtype) * 10,\n global_offset=offset_array.global_offset)\n # itrue div requires floats when doing true division (can't do in place conversion to float)\n if op == operator.itruediv:\n original = original.astype(np.float64)\n offset_array = offset_array.astype(np.float64)\n operate_param = operate_param.astype(np.float64)\n\n # Make sure to compare expected results as a ndarray because operate_param is a GlobalOffsetArray.\n if forward:\n left_expected = original.view(np.ndarray)\n right_expected = operate_param.view(np.ndarray)\n left_offset = offset_array\n right_offset = operate_param\n else:\n # test operation commutativity\n left_expected = operate_param.view(np.ndarray)\n right_expected = original.view(np.ndarray)\n left_offset = operate_param\n right_offset = offset_array\n\n expected_result = op(left_expected, right_expected)\n actual_result = op(left_offset, right_offset)\n\n if op in STANDARD_OPERATORS:\n expected = expected_result\n actual = actual_result\n else:\n expected = original\n actual = offset_array\n\n # ensure global_offset is preserved\n self.assertEqual(offset_array.global_offset, actual.global_offset)\n\n # ensure actual results match that of a regular ndarray\n self.assertTrue(np.array_equal(expected, actual))\n\n # ensure the results that are returned are a copy of an array instead of a view just like ndarray\n expected[tuple([0] * ndim)] = 1337\n actual[actual.global_offset] = 1337\n\n # original arrays were not modified\n self.assertEqual(np.any(original == 1337), np.any(offset_array == 1337))\n\n # Try testing with the operate param with a different global_offset\n operate_param.global_offset = tuple([1337] * ndim)\n with self.assertRaises(ValueError):\n op(left_offset, right_offset)\n\n # Try testing with the operate param with a partially overlapping data\n operate_param.global_offset = tuple(floor(size/2) + offset for size, offset in\n zip(offset_array.shape, offset_array.global_offset))\n with self.assertRaises(ValueError):\n op(left_offset, right_offset)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_fortran_frontend_view_test_3():\n test_name = \"view3_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),bb(:,:,j+1))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, n=10)\n assert (b[0, 0, 0] == 1)\n assert (b[0, 0, 1] == 43)", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def test032_2d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def compute_state_after_shock_hits_boundary():\n p20 = p1*scale\n f0 = func(p20)\n write(76,*) p20,f0\n p21 = p20*0.99\n f1 = func(p21)\n write(76,*) p21,f1\n ict = 0", "def test_4():\n np.random.seed(90)\n g = mt_obj.sog_gradient\n d = 100\n P = 50\n lambda_1 = 1\n lambda_2 = 10\n sigma_sq = 2\n store_x0, matrix_combined, store_c = (mt_obj.function_parameters_sog\n (P, d, lambda_1, lambda_2))\n func_args = P, sigma_sq, store_x0, matrix_combined, store_c\n tolerance = 0.00001\n bounds_set_x = (0, 1)\n point_index = 0\n x = np.random.uniform(*bounds_set_x, (d,))\n original_x = np.copy(x)\n set_x = 'random'\n sobol_points = None\n no_points = 1\n num_points = 1000\n with pytest.warns(RuntimeWarning):\n point_index, x, grad = (mt_alg.check_grad_starting_point\n (x, point_index, no_points, bounds_set_x,\n sobol_points, d, g, func_args, set_x,\n tolerance, num_points))\n assert(point_index > 0)\n assert(np.all(x != original_x))\n assert(no_points == 1)\n assert(np.all(g(x, *func_args) == grad))", "def test_unsubstituted_indexeds():\n grid = Grid(shape=(8, 8, 8))\n\n f = Function(name='f', grid=grid)\n p = TimeFunction(name='p', grid=grid)\n p1 = TimeFunction(name='p', grid=grid)\n\n f.data[:] = 0.12\n p.data[:] = 1.\n p1.data[:] = 1.\n\n eq = Eq(p.forward, sin(f)*p*f)\n\n op0 = Operator(eq)\n op1 = Operator(eq, opt=('advanced', {'linearize': True}))\n\n # NOTE: Eventually we compare the numerical output, but truly the most\n # import check is implicit to op1.apply, and it's the fact that op1\n # actually jit-compiles successfully, meaning that all substitutions\n # were performed correctly\n op0.apply(time_M=2)\n op1.apply(time_M=2, p=p1)\n\n assert np.allclose(p.data, p1.data, rtol=1e-7)", "def test_stencil_derivative(grid, shape, SymbolType, dim):\n i = dim(grid) # issue fixtures+parametrize: github.com/pytest-dev/pytest/issues/349\n u = SymbolType(name='u', grid=grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == shape and u_dii.grid.shape == shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))", "def test022_2d_numerical_comparison_on_fprop_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_index(self, backend):\n\n variable_lin_op = linOpHelper((2, 2), type='variable', data=1)\n view = backend.process_constraint(variable_lin_op, backend.get_empty_view())\n\n # cast to numpy\n view_A = view.get_tensor_representation(0)\n view_A = sp.coo_matrix((view_A.data, (view_A.row, view_A.col)), shape=(4, 4)).toarray()\n assert np.all(view_A == np.eye(4))\n\n index_2d_lin_op = linOpHelper(data=[slice(0, 2, 1), slice(0, 1, 1)], args=[variable_lin_op])\n out_view = backend.index(index_2d_lin_op, view)\n A = out_view.get_tensor_representation(0)\n\n # cast to numpy\n A = sp.coo_matrix((A.data, (A.row, A.col)), shape=(2, 4)).toarray()\n expected = np.array(\n [[1, 0, 0, 0],\n [0, 1, 0, 0]]\n )\n assert np.all(A == expected)\n\n index_1d_lin_op = linOpHelper(data=[slice(0, 1, 1)], args=[variable_lin_op])\n out_view = backend.index(index_1d_lin_op, view)\n A = out_view.get_tensor_representation(0)\n\n # cast to numpy\n A = sp.coo_matrix((A.data, (A.row, A.col)), shape=(1, 4)).toarray()\n expected = np.array(\n [[1, 0, 0, 0]]\n )\n assert np.all(A == expected)\n\n # Note: view is edited in-place:\n assert out_view.get_tensor_representation(0) == view.get_tensor_representation(0)", "def test_generate_condition_function():\n masks = 4 # Always > 2\n vals = 15\n np_masks = np.random.randint(2, size=(masks, vals), dtype=bool)\n tf_masks = [tf.constant(i, dtype=tf.bool) for i in np_masks]\n # Generate the functions for and and or\n f_and = generate_condition_function(masks, \"and\")\n f_or = generate_condition_function(masks, \"or\")\n # Get the numpy and tf results\n np_ands = np.all(np_masks, axis=0)\n np_ors = np.any(np_masks, axis=0)\n tf_ands, idx_ands = f_and(*tf_masks)\n tf_ors, idx_ors = f_or(*tf_masks)\n # Check the values are the same\n util_check(np_ands, tf_ands, idx_ands)\n util_check(np_ors, tf_ors, idx_ors)\n # Check a combination\n f_comb = generate_condition_function(3, [\"and\", \"or\"])\n np_comb = np_masks[0] & np_masks[1] | np_masks[2]\n tf_comb, idx_comb = f_comb(*tf_masks[:3])\n util_check(np_comb, tf_comb, idx_comb)\n # Check failures\n with pytest.raises(ValueError):\n generate_condition_function(1, \"and\")\n with pytest.raises(ValueError):\n generate_condition_function(5, \"bad_condition\")\n with pytest.raises(ValueError):\n generate_condition_function(5, [\"or\", \"and\"])\n with pytest.raises(ValueError):\n generate_condition_function(3, [\"or\", \"bad_condition\"])", "def alpha_a_b(coord, N, silent=True):\n [x0, x1, y0, y1] = coord\n\n a = 0\n for zero in zeros[:N]:\n a += exp(-zero*y0)/abs(complex(0.5, zero))\n b = 0\n for zero in zeros[N:]:\n b += exp(-zero*y0)/abs(complex(0.5, zero))\n\n def F_north(x):\n return abs(F_N(complex(x, y1), N))\n def F_south(x):\n return abs(F_N(complex(x, y0), N))\n def F_east(y):\n return abs(F_N(complex(x1, y), N))\n def F_west(y):\n return abs(F_N(complex(x0, y), N))\n\n # def x_bounds(f_new, x_new, f_old, x_old):\n # return x0 <= x_new[0] <= x1\n\n # def y_bounds(f_new, x_new, f_old, x_old):\n # return y0 <= x_new[0] <= y1\n\n ns_kwargs = {\"bounds\":[(x0, x1)]}\n ew_kwargs = {\"bounds\":[(y0, y1)]}\n\n min_north = basinhopping(F_north, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)\n min_south = basinhopping(F_south, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)\n min_east = basinhopping(F_east, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)\n min_west = basinhopping(F_west, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)\n\n # if not silent:\n # print('min_north')\n # print(min_north)\n # print('min_south')\n # print(min_south)\n # print('min_east')\n # print(min_east)\n # print('min_west')\n # print(min_west)\n\n min_north = min_north.fun\n min_south = min_south.fun\n min_east = min_east.fun\n min_west = min_west.fun\n\n if not silent:\n print((min_north, min_south, min_east, min_west))\n\n alpha = min(min_north, min_south, min_east, min_west)\n\n return alpha, a, b", "def findifmat(ngrid, d, p, h, axis): \n\n # Initialize \n fact = np.arange(1, d+1).prod()/h**d # d!/h**d\n smax = (d + p - 1)//2 # maximum index of the stencil\n nmat = np.prod(ngrid) # total number of grid points\n npts = ngrid[axis] # size of grid along derivative axis\n igrid = np.arange(nmat).reshape(ngrid) # row-major indexing for grid points\n igrid = np.swapaxes(igrid, axis, 0) # move the derivative axis to 0\n fdmat = np.zeros((nmat, nmat))\n\n # [smax:-smax] points centeral findif with [-(d+p-1)/2, ..., (d+p-1)/2] stencil\n sten = range(-smax, smax+1)\n coef = findifcoef(sten, d)\n row = igrid[smax:npts-smax].flatten()\n for ss, cc in zip(sten, coef): \n col = igrid[smax+ss:npts-smax+ss].flatten() \n fdmat[row, col] = cc\n\n # [0:smax] points forward findif with [0, 1, ..., (d+p-1)] stencil\n sten = range(d + p)\n coef = findifcoef(sten, d)\n row = igrid[0:smax].flatten()\n for ss, cc in zip(sten, coef):\n col = igrid[ss:smax+ss].flatten()\n fdmat[row, col] = cc\n\n # [-smax:] points backward findif with [0, -1, ..., -(d+p-1)] stencil\n sten = range(0, -(d + p), -1)\n coef = findifcoef(sten, d)\n row = igrid[npts-smax:npts].flatten()\n for ss, cc in zip(sten, coef):\n col = igrid[npts-smax+ss:npts+ss].flatten()\n fdmat[row, col] = cc\n\n # return fact * fdmat\n return fdmat / h**d", "def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test pointwise arithmetic with stencil offsets and open loop boundaries in indexed expression format
def test_indexed_open_loops(self, expr, result): i, j, l = dimify('i j l') pushed = [d.size for d in [j, l]] j.size = None l.size = None a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed fa = a.function fa.data[0, :, :] = 2. eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12) j.size, l.size = pushed
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_range_index_operator_eq_index_1(self):\n def test_impl(index1, index2):\n return index1 == index2\n sdc_func = self.jit(test_impl)\n\n n = 11\n for index1, index2 in product(_generate_range_indexes_fixed(n), repeat=2):\n with self.subTest(index1=index1, index2=index2):\n result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray\n result_ref = test_impl(index1, index2)\n np.testing.assert_array_equal(result, result_ref)", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_operator_same_size_global_offset_array(self):\n ndim = 5\n length = 4\n\n for op in STANDARD_OPERATORS | IN_PLACE_OPERATORS:\n for forward in [True, False]:\n (original, offset_array) = self.generate_data(ndim, length)\n operate_param = GlobalOffsetArray(np.ones(offset_array.shape, dtype=offset_array.dtype) * 10,\n global_offset=offset_array.global_offset)\n # itrue div requires floats when doing true division (can't do in place conversion to float)\n if op == operator.itruediv:\n original = original.astype(np.float64)\n offset_array = offset_array.astype(np.float64)\n operate_param = operate_param.astype(np.float64)\n\n # Make sure to compare expected results as a ndarray because operate_param is a GlobalOffsetArray.\n if forward:\n left_expected = original.view(np.ndarray)\n right_expected = operate_param.view(np.ndarray)\n left_offset = offset_array\n right_offset = operate_param\n else:\n # test operation commutativity\n left_expected = operate_param.view(np.ndarray)\n right_expected = original.view(np.ndarray)\n left_offset = operate_param\n right_offset = offset_array\n\n expected_result = op(left_expected, right_expected)\n actual_result = op(left_offset, right_offset)\n\n if op in STANDARD_OPERATORS:\n expected = expected_result\n actual = actual_result\n else:\n expected = original\n actual = offset_array\n\n # ensure global_offset is preserved\n self.assertEqual(offset_array.global_offset, actual.global_offset)\n\n # ensure actual results match that of a regular ndarray\n self.assertTrue(np.array_equal(expected, actual))\n\n # ensure the results that are returned are a copy of an array instead of a view just like ndarray\n expected[tuple([0] * ndim)] = 1337\n actual[actual.global_offset] = 1337\n\n # original arrays were not modified\n self.assertEqual(np.any(original == 1337), np.any(offset_array == 1337))\n\n # Try testing with the operate param with a different global_offset\n operate_param.global_offset = tuple([1337] * ndim)\n with self.assertRaises(ValueError):\n op(left_offset, right_offset)\n\n # Try testing with the operate param with a partially overlapping data\n operate_param.global_offset = tuple(floor(size/2) + offset for size, offset in\n zip(offset_array.shape, offset_array.global_offset))\n with self.assertRaises(ValueError):\n op(left_offset, right_offset)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_current_density_boundaries(self):\n t, x_n, x_p = self.t, self.x_n_edge, self.x_p_edge\n\n current_param = self.model.param.current_with_time\n\n i_cell = self.param.process_symbol(current_param).evaluate(t=t)\n np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[0]), i_cell, decimal=2)\n np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[-1]), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[-1]), i_cell, decimal=3)\n np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[0]), 0, decimal=4)", "def test_infer_for_idx1(self):\n input = \"\"\"\n Function: main\n Body:\n Var: i;\n For(i = foo(i), bool_of_string(\"True\"), 1) Do\n printStrLn(\"correct\");\n EndFor.\n EndBody.\n Function: foo\n Parameter: x\n Body:\n Var: y, z;\n y = (!z || False) && x;\n Return 100;\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInExpression(\n BinaryOp('&&', BinaryOp('||', UnaryOp('!', Id('z')), BooleanLiteral(False)), Id('x'))))\n self.assertTrue(TestChecker.test(input, expect, 472))", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def glint_mask(self): \n# glint_index = np.zeros([x_dim,y_dim])\n# for i in range(0,x_dim):\n# for j in range(0,y_dim):\n# ws = 2.0\n# sza_loc = sza[i,j]\n# vza_loc = vza[i,j]\n# raz_loc = raz2[i,j]\n# glint_index[i,j] = glint_calc.glint(sza_loc,vza_loc,raz_loc,ws)\n# ind_glint = glint_index > 0.0001\n# ind_water = np.logical_and(ind_water, ind_glint) \n return self", "def test_range_index_operator_eq_nparray(self):\n def test_impl(A, B):\n return A == B\n sdc_func = self.jit(test_impl)\n\n n = 11\n for A, B in product(\n _generate_range_indexes_fixed(n),\n map(lambda x: np.array(x), _generate_range_indexes_fixed(n))\n ):\n for swap_operands in (False, True):\n if swap_operands:\n A, B = B, A\n with self.subTest(left=A, right=B):\n result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray\n result_ref = test_impl(A, B)\n np.testing.assert_array_equal(result, result_ref)", "def test_unsubstituted_indexeds():\n grid = Grid(shape=(8, 8, 8))\n\n f = Function(name='f', grid=grid)\n p = TimeFunction(name='p', grid=grid)\n p1 = TimeFunction(name='p', grid=grid)\n\n f.data[:] = 0.12\n p.data[:] = 1.\n p1.data[:] = 1.\n\n eq = Eq(p.forward, sin(f)*p*f)\n\n op0 = Operator(eq)\n op1 = Operator(eq, opt=('advanced', {'linearize': True}))\n\n # NOTE: Eventually we compare the numerical output, but truly the most\n # import check is implicit to op1.apply, and it's the fact that op1\n # actually jit-compiles successfully, meaning that all substitutions\n # were performed correctly\n op0.apply(time_M=2)\n op1.apply(time_M=2, p=p1)\n\n assert np.allclose(p.data, p1.data, rtol=1e-7)", "def test_index(self, backend):\n\n variable_lin_op = linOpHelper((2, 2), type='variable', data=1)\n view = backend.process_constraint(variable_lin_op, backend.get_empty_view())\n\n # cast to numpy\n view_A = view.get_tensor_representation(0)\n view_A = sp.coo_matrix((view_A.data, (view_A.row, view_A.col)), shape=(4, 4)).toarray()\n assert np.all(view_A == np.eye(4))\n\n index_2d_lin_op = linOpHelper(data=[slice(0, 2, 1), slice(0, 1, 1)], args=[variable_lin_op])\n out_view = backend.index(index_2d_lin_op, view)\n A = out_view.get_tensor_representation(0)\n\n # cast to numpy\n A = sp.coo_matrix((A.data, (A.row, A.col)), shape=(2, 4)).toarray()\n expected = np.array(\n [[1, 0, 0, 0],\n [0, 1, 0, 0]]\n )\n assert np.all(A == expected)\n\n index_1d_lin_op = linOpHelper(data=[slice(0, 1, 1)], args=[variable_lin_op])\n out_view = backend.index(index_1d_lin_op, view)\n A = out_view.get_tensor_representation(0)\n\n # cast to numpy\n A = sp.coo_matrix((A.data, (A.row, A.col)), shape=(1, 4)).toarray()\n expected = np.array(\n [[1, 0, 0, 0]]\n )\n assert np.all(A == expected)\n\n # Note: view is edited in-place:\n assert out_view.get_tensor_representation(0) == view.get_tensor_representation(0)", "def test_expressions(self):\n self.build()\n lldbutil.run_to_name_breakpoint(self, \"main\")\n exprs = (\"argc + 1\", \"(void)argc\", \"(int)abs(argc)\")\n for expr in exprs:\n self._expect_cmd(f\"dwim-print {expr}\", \"expression\")", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def test_outside_plus_inside(self):\n for region, bounds in load_region_bounds_dict().items():\n lon_bounds, lat_bounds = bounds\n for key in ['data01', 'ds_shift_lon', 'ds_rev_both', 'ds_irr_both']:\n outside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='outside')['PRECL']\n inside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='inside')['PRECL']\n outside_plus_inside = (np.nan_to_num(outside_data.values) +\n np.nan_to_num(inside_data.values))\n diff_from_input = outside_plus_inside - data_dict[key]['PRECL'].values\n assert np.abs(diff_from_input).max() == 0", "def unityTestChangeOfCoordinates(map, ClosedLoopData):\n TestResult = 1\n for i in range(0, ClosedLoopData.x.shape[0]):\n xdat = ClosedLoopData.x\n xglobdat = ClosedLoopData.x_glob\n\n s, ey, _, _ = map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5], xglobdat[i, 3])\n v1 = np.array([s, ey])\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n # print v1, v2, np.dot(v1 - v2, v1 - v2), np.dot(v3 - v4, v3 - v4)\n\n if np.dot(v3 - v4, v3 - v4) > 0.00000001:\n TestResult = 0\n print \"ERROR\", v1, v2, v3, v4\n pdb.set_trace()\n v1 = np.array(map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5]))\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n print np.dot(v3 - v4, v3 - v4)\n pdb.set_trace()\n\n if TestResult == 1:\n print \"Change of coordinates test passed!\"", "def unityTestChangeOfCoordinates(map, ClosedLoopData):\n TestResult = 1\n for i in range(0, ClosedLoopData.x.shape[0]):\n xdat = ClosedLoopData.x\n xglobdat = ClosedLoopData.x_glob\n\n s, ey, _, _ = map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5], xglobdat[i, 3])\n v1 = np.array([s, ey])\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n # print v1, v2, np.dot(v1 - v2, v1 - v2), np.dot(v3 - v4, v3 - v4)\n\n if np.dot(v3 - v4, v3 - v4) > 0.00000001:\n TestResult = 0\n print (\"ERROR\", v1, v2, v3, v4)\n pdb.set_trace()\n v1 = np.array(map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5]))\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n print (np.dot(v3 - v4, v3 - v4))\n pdb.set_trace()\n\n if TestResult == 1:\n print (\"Change of coordinates test passed!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test calltime symbols overrides with other symbols
def test_override_symbol(self): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.) a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.) op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1.data, np.zeros(shape) + 6)) assert(np.allclose(a2.data, np.zeros(shape) + 7))
[ "def _unrecognized_symbol_func(**kwargs):", "def FakeSymbol(*args, _op, **kwargs):\n return symbol.Custom(*args, _op=_op, op_type=\"_fake\", **kwargs)", "def test_symbol(self, data, symbol_first, symbol_second):\n layer = Points(data)\n assert layer.symbol == \"disc\"\n\n layer.symbol = symbol_first\n assert layer.symbol == symbol_first\n\n layer = Points(data, symbol=symbol_first)\n assert layer.symbol == symbol_first\n\n layer.symbol = symbol_second\n assert layer.symbol == symbol_second", "def testCtor(self):\n try: pykd.DiaSymbol()\n except RuntimeError: pass", "def test_magicGlobalsName(self):\r\n self.flakes('__name__')", "def test_custom_symbols():\n for _ in jabbar(range(100), symbols=\"yo\"):\n pass", "def test_register_standard_variables(self):\n pass", "def _otherSymbols(self, sym):\n if sym == 'a':\n return 'sflr'\n elif sym == 's':\n return 'aflr'\n elif sym == 'f':\n return 'aslr'\n elif sym == 'l':\n return 'asfr'\n elif sym == 'r':\n return 'asfl'\n else:\n print(\"_otherSymbols: should never get here!\")", "def enable_named_call():\n global _use_named_call\n _use_named_call = True", "def test_magicGlobalsBuiltins(self):\r\n self.flakes('__builtins__')", "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)", "def stubFunc( *args, **keywords ):\n maya.cmds.dynamicLoad( library )\n # call the real function which has replaced us\n return maya.cmds.__dict__[command]( *args, **keywords )", "def __init__(self):\n self.symbol_table_class = {}\n self.symbol_table_subroutine = {}", "def test_patch_pci_switch(self):\n pass", "def test_no_conflicting_symbols():\n full_set = set(default_unit_symbol_lut.keys())\n\n # go through all possible prefix combos\n for symbol in default_unit_symbol_lut.keys():\n if default_unit_symbol_lut[symbol][4]:\n keys = unit_prefixes.keys()\n else:\n keys = [symbol]\n for prefix in keys:\n new_symbol = f\"{prefix}{symbol}\"\n\n # test if we have seen this symbol\n assert new_symbol not in full_set, f\"Duplicate symbol: {new_symbol}\"\n\n full_set.add(new_symbol)", "def test_func7(self):\n pass", "def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)", "def test_misc_instruction(self):\n with self.assertRaises(InvalidOpcodeException):\n self.vm_opcode.instruction_lookup(0xF008)\n\n with self.assertRaises(InvalidOpcodeException):\n self.vm_opcode.instruction_lookup(0xF054)", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test calltime symbols overrides with numpy arrays
def test_override_array(self): i, j, k, l = dimify('i j k l') shape = tuple(d.size for d in (i, j, k, l)) a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = np.zeros(shape=shape, dtype=np.float32) + 3. a2 = np.zeros(shape=shape, dtype=np.float32) + 4. op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1, np.zeros(shape) + 6)) assert(np.allclose(a2, np.zeros(shape) + 7))
[ "def test__array__(parameter):\n par, _ = parameter\n assert np.array(par) == np.array(10.0)", "def test_TimeArray_repr():", "def test_format_signature_numpy():", "def test_TimeArray_convert_unit():", "def test_ndarray(self):\n self.check_roundtrip_ndarrays(np.zeros((5,3)))\n self.check_roundtrip_ndarrays(np.ones((1, 3), dtype=np.int64))", "def test_np_array_creation(self):\n self.assertTrue(np.array_equal(recursive_make_object(self.np_dict, self.class_dictionary), self.np_array))", "def test_numpy_crash(self):\n #a crash occurred somewhere in the past, and an\n # InferenceError instead of a crash was better, but now we even infer!\n try:\n import numpy # pylint: disable=unused-variable\n except ImportError:\n self.skipTest('test skipped: numpy is not available')\n builder = AstroidBuilder()\n data = \"\"\"\nfrom numpy import multiply\n\nmultiply(1, 2, 3)\n\"\"\"\n astroid = builder.string_build(data, __name__, __file__)\n callfunc = astroid.body[1].value.func\n inferred = callfunc.inferred()\n self.assertEqual(len(inferred), 1)", "def test_works_as_expected_within_numpy_array(self):\n try:\n import numpy as np\n except ImportError:\n np = None\n\n if np is None:\n pytest.skip('No numpy installed')\n\n # see PR #836\n dataset = Dataset()\n patient_name = 'MacDonald^George'\n dataset.PatientName = patient_name\n array_of_datasets = np.array([dataset])\n assert patient_name == array_of_datasets[0].PatientName", "def test_short_20_numpy_identify_frame_after_false_seek(self):\n config.OPTS.use_numpy = True\n self.identify_frame_after_false_seek()", "def test_expanding_np_arrays(self):\n self.assertEqual(expand_to_dict(self.np_array), self.np_dict)", "def test_array(self):\n obj = self.analyzer.get_object(['overload'])\n assert obj.params[0].type == 'string[]'", "def test_numpy(logging_mixin: Any) -> None:\n # Setup\n yml = yaml.yaml()\n\n test_array = np.array([1, 2, 3, 4, 5])\n\n # Perform a round-trip of dumping and loading\n result = dump_and_load_yaml(yml = yml, input_value = [test_array])\n\n assert np.allclose(test_array, result)", "def test_numpy():\n assert len(array_like()) < 5 # In case we extend at some point\n\n if len(array_like()) > 2: # Test numpy\n import numpy as np\n np_bounds = CategoricalBounds(np.array([\"spam\", \"eggs\"], dtype=object))\n np_copy = loads(dumps(np_bounds))\n assert np_copy == np_bounds\n\n if len(array_like()) > 3: # Test numpy\n import pandas as pd\n pd_bounds = CategoricalBounds(pd.Series([\"spam\", \"eggs\"]))\n pd_copy = loads(dumps(pd_bounds))\n assert pd_copy == pd_bounds", "def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)", "def test_time(self):\n # here there's a minor difference because we have a ndarray with\n # dtype=np.int.\n a = TimeArray(20, \"h\")\n self.assertAlmostEqual(a.to(\"s\"), 3600 * 20)\n #Test left and right multiplication.\n self.assertEqual(str(a * 3), \"60 h\")\n self.assertEqual(str(3 * a), \"60 h\")", "def test05_array_overloads(self):\n\n import cppyy\n c_overload = cppyy.gbl.c_overload\n d_overload = cppyy.gbl.d_overload\n\n from array import array\n\n ai = array('i', [525252])\n assert c_overload().get_int(ai) == 525252\n assert d_overload().get_int(ai) == 525252\n\n ah = array('h', [25])\n assert c_overload().get_int(ah) == 25\n assert d_overload().get_int(ah) == 25", "def test_numpy_ops(self):\n\n arr = np.array([1, 2, 3])\n c = Column('a', arr)\n eq = c == arr\n assert np.all(eq)\n assert len(eq) == 3\n assert type(eq) == Column\n assert eq.dtype.str == '|b1'\n eq = arr == c\n assert np.all(eq)\n\n lt = c - 1 < arr\n assert np.all(lt)", "def test_convert_array(self):\r\n data = np.array([1, 2, 3])\r\n res = data.numpy()\r\n\r\n assert np.shares_memory(res, data)\r\n assert np.all(res == data)\r\n assert isinstance(res, np.ndarray)\r\n assert not isinstance(res, np.tensor)", "def test_Sobol_G_raises_error_if_values_not_numpy_array():\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n evaluate(x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the dimension sizes are being inferred correctly
def test_dimension_size_infer(self, nt=100): i, j, k = dimify('i j k') shape = tuple([d.size for d in [i, j, k]]) a = DenseData(name='a', shape=shape).indexed b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed eqn = Eq(b[time, x, y, z], a[x, y, z]) op = Operator(eqn) _, op_dim_sizes = op.arguments() assert(op_dim_sizes[time.name] == nt)
[ "def check_sizes(self):\n assert self.get_queries().shape == (self.nq, self.d)\n if self.nt > 0:\n xt = self.get_train(maxtrain=123)\n assert xt.shape == (123, self.d), \"shape=%s\" % (xt.shape, )\n assert self.get_database().shape == (self.nb, self.d)\n assert self.get_groundtruth(k=13).shape == (self.nq, 13)", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def check_sizes(input_tensor, input_name, expected):\n condition = [input_tensor.ndimension() == len(expected)]\n for i, size in enumerate(expected):\n if size.isdigit():\n condition.append(input_tensor.size(i) == int(size))\n assert(all(condition)),\\\n \"wrong size for {}, expected {}, got {}\".format(input_name, 'x'.join(expected), list(input_tensor.size()))", "def dimensions():", "def test_holdertable_output_size(common_minima, targetdim):\n assert fx.holdertable(common_minima).shape == targetdim", "def test_reduce_dimensionality(embeddings, shape):\n model = BERTopic()\n umap_embeddings = model._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def test_dim_len(array, shape):\n for axis, size in enumerate(shape):\n assert size == dim_len(array, axis)\n\n # Requesting the size for an axis higher than the array has dimensions should raise a TypeError\n with pytest.raises(TypeError):\n dim_len(array, len(shape))", "def test_batch_size_pack_size():", "def test_shape_data(self):\n self.assert_dim_type_supported({\"x\": \"uniform(0, 5, shape=(3, 2))\"})", "def test_model_size(model, dtype):\n input = torch.zeros((64, 1, 28, 28), dtype=dtype)\n scores = model(input)\n assert scores.size() == torch.Size([64, 2]), 'Model size is NOT good'", "def test_eggholder_output_size(common_minima2, targetdim):\n assert fx.eggholder([-10, 0] * common_minima2).shape == targetdim", "def test_dim(self):\n mat = self.rand_matrix(4, 4)\n self.assertEqual(Operator(mat).dim, (4, 4))\n self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))\n self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def _check_input_dimensions(self, obs_dim):\n if obs_dim == self.length_raw_array:\n return True\n else:\n raise ObservationArrayExpectedDimFail()", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def test_n_dim(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n hist0 = hg.Count()\n\n assert hist0.n_dim == 0\n assert hist1.n_dim == 1\n assert hist2.n_dim == 2\n assert hist3.n_dim == 3", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def test_dimensions(nc, requirements):\n retVal = 0\n\n for req_dimension in requirements['dimensions']:\n if req_dimension not in nc.dimensions:\n print \"Dimension Missing: %s\" % (req_dimension)\n retVal += 1\n\n return retVal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test Box with photon shooting. Particularly the flux of the final image.
def test_box_shoot(): rng = galsim.BaseDeviate(1234) obj = galsim.Box(width=1.3, height=2.4, flux=1.e4) im = galsim.Image(100,100, scale=1) im.setCenter(0,0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Box makePhot not equivalent to drawPhot" obj = galsim.Pixel(scale=9.3, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Pixel makePhot not equivalent to drawPhot" obj = galsim.TopHat(radius=4.7, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "TopHat makePhot not equivalent to drawPhot"
[ "def shooting(self):\n pass", "def test_photon_flux(self):\n self.assertIsInstance(self.el.photon_flux(), units.Quantity)", "def shoot(self):\n self.assertIsInstance(gun(3).shoot(), 2)\n self.assertIsInstance(gun(10).shoot(), 9)", "def test_photon_flux(self):\n self.assertEqual(self.el.photon_flux().unit, units.Unit(\"1/(s*cm2)\"))", "def fluximage3(ri, li, rs, v, x, y, N, wBx, wBy, Ntest): \n \n # magnify = (rs + ri + li)/(ri)\n magnify = (rs+li+ri)/(ri+.5*li)\n\n xgv = x[:,0].flatten()\n ygv = y[0,:].flatten()\n xmin = np.min(xgv)\n xmax = np.max(xgv)\n ymin = np.min(ygv)\n ymax = np.max(ygv)\n\n dx = np.mean(np.diff(xgv))\n dy = np.mean(np.diff(ygv))\n x_edges = np.append(xgv - dx/2.0, xgv[-1] + dx/2.0)\n y_edges = np.append(ygv - dy/2.0, ygv[-1] + dy/2.0)\n \n # xd: N-element 1d Numpy Array, x positions of particles at deflection plane, in SI units\n # yd: N-element 1d Numpy Array, y positions of particles at deflection plane, in SI units\n xd = np.random.uniform(xmin, xmax, size=(Ntest,))\n yd = np.random.uniform(ymin, ymax, size=(Ntest,))\n \n xyd = np.stack((xd, yd), axis=1)\n #del xd, yd\n \n #wBx_rbv = sp.interpolate.RectBivariateSpline(xgv, ygv, wBx)\n #wBy_rbv = sp.interpolate.RectBivariateSpline(xgv, ygv, wBy)\n #wBxd = wBx_rbv.ev(xd, yd)\n #wByd = wBy_rbv.ev(xd, yd)\n \n wBxd = sp.interpolate.interpn((xgv, ygv), wBx, xyd, method='linear')\n wByd = sp.interpolate.interpn((xgv, ygv), wBy, xyd, method='linear')\n\n xfd = xd + rs/(magnify*v) * wBxd\n yfd = yd + rs/(magnify*v) * wByd\n \n print(\"Histogramming reference...\")\n flux_ref, _, _ = np.histogram2d(xd, yd, bins=[x_edges, y_edges])\n flux_ref = flux_ref * N/Ntest\n \n print(\"Histogramming signal...\")\n flux_image, _, _ = np.histogram2d(xfd, yfd, bins=[x_edges, y_edges])\n flux_image = flux_image * N/Ntest\n\n print('DONE')\n \n return(flux_image, flux_ref)", "def shoot(self):\n if self.stuck_on[0]:\n self.stick(None)\n self.change_y = BALL_SPEED", "def test_sersic_shoot():\n rng = galsim.BaseDeviate(1234)\n obj = galsim.Sersic(n=1.5, half_light_radius=3.5, flux=1.e4)\n im = galsim.Image(100,100, scale=1)\n im.setCenter(0,0)\n added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate())\n print('obj.flux = ',obj.flux)\n print('added_flux = ',added_flux)\n print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max())\n print('image flux = ',im.array.sum())\n assert np.isclose(added_flux, obj.flux)\n assert np.isclose(im.array.sum(), obj.flux)\n photons2 = obj.makePhot(poisson_flux=False, rng=rng)\n assert photons2 == photons, \"Sersic makePhot not equivalent to drawPhot\"\n\n obj = galsim.DeVaucouleurs(half_light_radius=3.5, flux=1.e4)\n # Need a larger image for devauc wings\n im = galsim.Image(1000,1000, scale=1)\n im.setCenter(0,0)\n added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate())\n print('obj.flux = ',obj.flux)\n print('added_flux = ',added_flux)\n print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max())\n print('image flux = ',im.array.sum())\n assert np.isclose(added_flux, obj.flux)\n assert np.isclose(im.array.sum(), obj.flux)\n photons2 = obj.makePhot(poisson_flux=False, rng=rng)\n assert photons2 == photons, \"Sersic makePhot not equivalent to drawPhot\"\n\n # Can do up to around n=6 with this image if hlr is smaller.\n obj = galsim.Sersic(half_light_radius=0.9, n=6.2, flux=1.e4)\n added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate())\n print('obj.flux = ',obj.flux)\n print('added_flux = ',added_flux)\n print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max())\n print('image flux = ',im.array.sum())\n assert np.isclose(added_flux, obj.flux)\n assert np.isclose(im.array.sum(), obj.flux)\n photons2 = obj.makePhot(poisson_flux=False, rng=rng)\n assert photons2 == photons, \"Sersic makePhot not equivalent to drawPhot\"", "def shoot(self):\n if self.gun_interface:\n self.gun_interface.prepare_fire()", "def test_conserve_flux(self):\n myia = self.ia\n imagename = \"tres1x.im\"\n myia.fromshape(imagename, [100, 100])\n csys = myia.coordsys()\n csys.setunits([\"arcsec\", \"arcsec\"])\n csys.setincrement([-1, 1])\n myia.setcoordsys(csys.torecord())\n myia.setrestoringbeam(major=\"6arcsec\", minor=\"3arcsec\", pa=\"0deg\")\n shape = myia.shape()\n values = make_gauss2d(shape, 3.0, 6.0)\n #expected = make_gauss2d(shape, 5.0, 10.0)\n myia.putchunk(values)\n for unit in (\"K\", \"cm-2\"):\n myia.setbrightnessunit(unit)\n zz = myia.fitcomponents()\n mycl = cltool()\n mycl.fromrecord(zz['results'])\n expected = mycl.getfluxvalue(0)\n gg = iatool()\n outfile = \"gxg_\" + unit + \".im\"\n imsmooth(\n imagename=imagename, targetres=True, major=\"10arcsec\", minor=\"5arcsec\",\n pa=\"0deg\", outfile=outfile\n )\n gg.open(outfile)\n zz = gg.fitcomponents()\n gg.done()\n mycl.fromrecord(zz['results'])\n got = mycl.getfluxvalue(0)\n self.assertTrue(abs(got[0]/expected[0] - 1) < 3e-7, \"Failed testing unit \" + unit)\n mycl.done()\n myia.done()", "def test_photon_energy_flux(self):\n self.assertIsInstance(self.el.photon_energy_flux(), units.Quantity)", "def _draw_shoot_state(self, state: CannonState) -> None:\n size, tux_img = self.__imageLoader.get_tux_small()\n text_surface = self.__big_font.render('Shoot: ', False, (0, 0, 0))\n\n self.__screen.blit(text_surface, (600, 10))\n\n if state == 'SingleShoot':\n self.__screen.blit(tux_img, (670, 10))\n\n if state == 'DoubleShoot':\n self.__screen.blit(tux_img, (670, 10))\n self.__screen.blit(tux_img, (690, 10))", "def moves_boxs(pokemon):\n pass", "def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass", "def shoot(self, (x, y)):\n return self.__wt.shoot(self, (x, y))", "def test_shoot(self):\n world = self.server.get_world()\n world.setInputShootPlayer = MagicMock(name='setInputShootPlayer')\n self.server.run_command('1 SHOOT')\n world.setInputShootPlayer.assert_called_once_with(1)", "def process_pucks(self, robot, sensor_dump, visualize=False):\n\n image = sensor_dump.lower_image\n\n # We'll use the vector 'normal' below which is the guidance vector \n # (vector to the landmark) rotated by pi/2.\n normal = (-self.guidance_vec[1], self.guidance_vec[0])\n normal_length = sqrt(normal[0]**2 + normal[1]**2)\n\n # Now we look at all puck pixels and calculate the perpendicular dist\n # from each to the landmark. We set the target to the puck with the \n # largest d (i.e. the largest-closest distance).\n new_target_selected = False\n largest_d = 0\n largest_dp = None\n for i in range(image.n_rows):\n if image.masks[i] & self.puck_mask != 0:\n (xr, yr) = image.calib_array[i,2], image.calib_array[i,3]\n pt = image.calib_array[i,4]\n pr = image.calib_array[i,5]\n puck_vec_length = sqrt(xr**2 + yr**2)\n\n d = sqrt(distance_squared((xr, yr), self.guidance_vec))\n\n dot_product = xr * normal[0] + yr * normal[1]\n norm_dot_product = dot_product / (normal_length * puck_vec_length)\n #angle_between_normal_and_puck = acos(dot_product / (normal_length * puck_vec_length))\n\n if visualize:\n draw_segment_wrt_robot(robot, (xr, yr),\n self.guidance_vec, color=(255,0,255), width=3)\n \n condition_okay = True\n if (self.puck_condition == \"SIMPLE\"):\n pass\n\n #elif (self.puck_condition == \"DOT\" and fabs(angle_between_normal_and_puck) < pi/4):\n # condition_okay = False\n elif (self.puck_condition == \"DOT\" and norm_dot_product < 0.75):\n condition_okay = False\n\n elif (self.puck_condition == \"BETA\"\n and (pt - self.guidance_angle) >= self.outer_exclude_angle):\n condition_okay = False\n\n elif (self.puck_condition == \"BOTH\"\n and ((pt - self.guidance_angle) <= self.inner_exclude_angle)\n or ((pt - self.guidance_angle) >= self.outer_exclude_angle)):\n condition_okay = False\n\n if (condition_okay\n and d > self.puck_dist_threshold\n and d > largest_d):\n largest_d = d\n #largest_dp = angle_between_normal_and_puck\n largest_dp = dot_product\n self.target_pos = (xr, yr)\n new_target_selected = True\n # (pr, pt + self.target_angle_bias)\n \n if new_target_selected:\n print(\"largest_dp: {}\".format(largest_dp), end='')\n\n if not new_target_selected:\n self.target_pos = None", "def test_shots_passed(self):\n prog = sf.TDMProgram(2)\n eng = sf.Engine(\"gaussian\")\n\n with prog.context([1,2], [3,4]) as (p, q):\n ops.Sgate(p[0]) | q[0]\n ops.MeasureHomodyne(p[1]) | q[0]\n\n prog.run_options = {\"shots\": 5}\n results = eng.run(prog, shots=2)\n assert results.samples.shape[0] == 2\n assert prog.run_options[\"shots\"] == 5", "def boltshoot(self):\n if self.input.is_key_down('spacebar'):\n self.getWave().boltInit()", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decide whether to enter hotspot mode or wifi mode and then do so
def set_wifi_mode(args): pass """+ try: if args['mode'] == 'hotspot': logger.info('will enter hotspot mode') #TODO - Need to capture the line that contains interface [some lan id] and uncomment it. change_file_line(path.join('/etc', 'dhcpcd.conf'), interface_l1_res, 'interface {}\n'.format() return True if args['silent'] else 'Ok' if args['mode'] == 'wi-fi': logger.info('will enter wi-fi mode') return True if args['silent'] else 'Ok' else: logger.error('Unknown wi-fi mode: {}'.format(args['mode'])) return False if args['silent'] else 'ERROR' except: logger.error('Exception in set_wifi_mode: {}, {}'.format(exc_info()[0], exc_info()[1])) return False if args['silent'] else 'ERROR' """
[ "def wifi(self):\n if not hasattr(self, \"_wifi\"):\n cmd = [\"networksetup\", \"-getairportpower\", \"en0\"]\n output = subprocess.check_output(cmd)\n self._wifi = \"On\" in str(output)\n return self._wifi", "def test_usb_tethering_coexist_wifi_hotspot(self):\n self.log.info('Enable wifi hotspot.')\n self.enable_wifi_hotspot()\n self.enable_usb_tethering()\n self.log.info('Ping test with hotspot enable.')\n self.can_ping_through_usb_interface()\n self.log.info('Disable wifi hotspot.')\n self.dut.droid.connectivityStopTethering(tel_defines.TETHERING_WIFI)\n self.log.info('Ping test with hotspot disable.')\n self.can_ping_through_usb_interface()", "def _is_device_wifi(self, device):\n device_info = device[1]\n real, device_type = device_info['Real'], device_info['DeviceType']\n return bool(real) and device_type == 2", "def _cycle_wifi(mode=None):\n call(['ifdown', settings.WIFI_INTERFACE])\n if mode is not None:\n call(['iwconfig', settings.WIFI_INTERFACE, 'mode', mode])\n call(['ifup', settings.WIFI_INTERFACE])", "def configure_wifi_and_airplane_mode(wifi_enabled=False):\n # Airplane mode should be disabled in all cases. This can get inadvertently\n # turned on via gestures.\n adb.disable_airplane_mode()\n\n # Need to disable wifi before changing configuration.\n adb.disable_wifi()\n\n # Check if wifi needs to be enabled. If not, then no need to modify the\n # supplicant file.\n wifi_enabled = wifi_enabled or environment.get_value('WIFI', True)\n if not wifi_enabled:\n # No more work to do, we already disabled it at start.\n return\n\n if adb.is_gce():\n wifi_ssid = 'VirtWifi'\n wifi_password = ''\n else:\n config = db_config.get()\n if not config.wifi_ssid:\n logs.log('No wifi ssid is set, skipping wifi config.')\n return\n wifi_ssid = config.wifi_ssid\n wifi_password = config.wifi_password or ''\n\n adb.enable_wifi()\n\n # Wait 2 seconds to allow the wifi to be enabled.\n time.sleep(2)\n\n wifi_util_apk_path = os.path.join(\n environment.get_platform_resources_directory(), 'wifi_util.apk')\n if not adb.is_package_installed(WIFI_UTIL_PACKAGE_NAME):\n adb.install_package(wifi_util_apk_path)\n\n connect_wifi_command = (\n 'am instrument -e method connectToNetwork -e ssid {ssid} ')\n if wifi_password:\n connect_wifi_command += '-e psk {password} '\n connect_wifi_command += '-w {call_path}'\n\n output = adb.run_adb_shell_command(\n connect_wifi_command.format(\n ssid=quote(wifi_ssid),\n password=quote(wifi_password),\n call_path=WIFI_UTIL_CALL_PATH))\n if 'result=true' not in output:\n logs.log_error('Failed to connect to wifi.', output=output)", "def power_idle_tethering_test(self):\n attrs = ['screen_status', 'band', 'client_connect', 'wifi_sharing']\n indices = [2, 4, 7, 9]\n self.decode_test_configs(attrs, indices)\n\n client_connect = self.test_configs.client_connect == 'true'\n\n # Setup Hotspot with desired config and connect (or not) client\n self.setup_hotspot(client_connect)\n\n # Measure power and validate\n self.measure_power_and_validate(False)", "def switch_network(self,type = None):\n network_type = self.appconfig(type,\"Settings\")\n self.logger.debug(\"Switch network to %s:%s.\" % (type,network_type))\n if self.enter_settings(u\"More…\"):\n if self.device(text=\"Mobile networks\").exists:\n self.device(text=\"Mobile networks\").click()\n if self.device(text=\"Preferred network mode\").wait.exists(timeout=self.timeout):\n self.device(text=\"Preferred network mode\").click()\n if self.device(resourceId=\"android:id/buttonPanel\").wait.exists(timeout=self.timeout):\n self.device(text=network_type).click()\n print self._is_connected(type)\n self.back_to_home()", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "def wireless_ap(color, speed, action):\r\n\r\n if action:\r\n os.system('cp /etc/dhcpcd.conf.activate /etc/dhcpcd.conf') # Set static ip for wlan0\r\n os.system('systemctl enable hostapd') # Enable hostapd at boot\r\n while globals.direction != 'middle':\r\n show_message_break(\"AP started! System will reboot now\", color, speed)\r\n get_joystick()\r\n passed()\r\n os.system('reboot')\r\n else:\r\n os.system('systemctl disable hostapd') # Disable hostapd at boot\r\n os.system('cp /etc/dhcpcd.conf.desactivate /etc/dhcpcd.conf') # Remove static ip\r\n while globals.direction != 'middle':\r\n show_message_break(\"AP stopped! System will reboot now\", color, speed)\r\n get_joystick()\r\n passed()\r\n os.system('reboot')", "def setup_hotspot(self, connect_client=False):\n try:\n if self.test_configs.band == self.PARAM_2G_BAND:\n wifi_band_id = WIFI_CONFIG_APBAND_2G\n elif self.test_configs.band == self.PARAM_5G_BAND:\n wifi_band_id = WIFI_CONFIG_APBAND_5G\n else:\n raise ValueError()\n except ValueError:\n self.log.error(\n \"The test name has to include parameter {} followed by \"\n \"either {} or {}.\".format(self.PARAM_WIFI_BAND,\n self.PARAM_2G_BAND,\n self.PARAM_5G_BAND))\n return False\n\n # Turn WiFi ON for DUT (hotspot) and connect to AP (WiFiSharing)\n # Hotspot needs airplane mode OFF\n self.dut.droid.connectivityToggleAirplaneMode(False)\n time.sleep(2)\n if self.test_configs.wifi_sharing == 'OFF':\n wutils.wifi_toggle_state(self.dut, True)\n time.sleep(2)\n else:\n self.setup_ap_connection(\n self.main_network[self.test_configs.wifi_sharing])\n\n # Setup tethering on dut\n wutils.start_wifi_tethering(\n self.dut, self.network[wutils.WifiEnums.SSID_KEY],\n self.network[wutils.WifiEnums.PWD_KEY], wifi_band_id)\n\n # Connect client device to Hotspot\n if connect_client:\n wutils.wifi_connect(\n self.android_devices[1],\n self.network,\n check_connectivity=False)", "def connect_type(word_list):\n if 'wlan0' in word_list or 'wlan1' in word_list:\n con_type = 'wifi'\n elif 'eth0' in word_list:\n con_type = 'ethernet'\n else:\n con_type = 'current'", "def updateDriveMode(self):\n changedMode = False\n def changeModeIfNeeded(mode):\n if self.steeringMode != mode:\n self.steeringMode = mode\n return True\n return False\n #\n if self.joyState['buttonCircleStrafe']:\n changedMode = changeModeIfNeeded(DriveMode.CIRCLESTRAFE)\n if changedMode:\n self.circleStrafeDistance = TYCHO_DEFAULT_CIRCLE_STRAFE_DISTANCE\n elif self.joyState['buttonFrontSteer']:\n changedMode = changeModeIfNeeded(DriveMode.FRONT_STEER)\n elif self.joyState['buttonTurnInPlace']:\n changedMode = changeModeIfNeeded(DriveMode.INPLACE)\n elif self.joyState['buttonStrafe']:\n changedMode = changeModeIfNeeded(DriveMode.STRAFE)\n elif self.joyState['buttonNormal']:\n changedMode = changeModeIfNeeded(DriveMode.NORMAL)\n elif self.joyState['buttonStop']:\n changedMode = changeModeIfNeeded(DriveMode.STOP)\n else:\n changedMode = changeModeIfNeeded(DriveMode.NORMAL)\n #\n if changedMode:\n print(\"Set driving mode to %s\"%self.steeringMode.name)\n #", "def SetMode(self, mode):\n if not self.iface: return False\n mode = _sanitize_string(mode)\n if mode.lower() == 'master':\n mode = 'managed'\n cmd = 'iwconfig %s mode %s' % (self.iface, mode)\n if self.verbose: print cmd\n misc.Run(cmd)", "def mode_wifi(device_id = None):\n devices = H.adb_devices()\n if len(devices.usb) == 0:\n return\n if device_id is not None:\n assert device_id in devices.usb\n else:\n device_id = devices.usb[0]\n \n ip = H.device_ip(device_id)\n port = 5555\n Popen([\"adb\", \"tcpip\", \"%d\" % (port,)]).wait()\n time.sleep(4)\n Popen([\"adb\", \"connect\", \"%s\" % (ip,)]).wait()\n print \"Disconnect USB cable...\"\n H.wait_usb_disconnection()\n print H.adb_devices()", "def OnTheaterMode(self, TheaterMode=sentinel):", "def enable_wifi():\r\n if int(Device.platform_version[0]) > 6:\r\n # command = 'adb shell am broadcast -a io.appium.settings.wifi --es setstatus enable'\r\n command = 'adb shell svc wifi enable'\r\n else:\r\n command = 'adb shell am start -n io.appium.settings/.Settings -e wifi on'\r\n\r\n Android.send_adb_command(command, 10)\r\n logging.info('ADB command to enable Wifi has been sent')", "def connect_type(word_list):\n if 'wlan0' in word_list or 'wlan1' in word_list:\n con_type = 'wifi'\n elif 'eth0' in word_list:\n con_type = 'ethernet'\n else:\n con_type = 'current'\n return con_type", "def enable_airplane_mode(driver):\r\n if int(Device.platform_version[0]) < 7:\r\n command = 'adb shell settings put global airplane_mode_on 1 & ' \\\r\n 'adb shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true'\r\n Android.send_adb_command(command, 5)\r\n logging.info('ADB command to enable airplane mode has been sent')\r\n elif int(Device.platform_version[0]) >= 7:\r\n # opening setting panel\r\n command = 'adb shell am start -a android.settings.AIRPLANE_MODE_SETTINGS'\r\n Android.send_adb_command(command, 5)\r\n logging.info('ADB command to open setting panel')\r\n setting_airplane_mode_switch = keywords.get_device_locator('setting_airplane_mode_switch')\r\n if keywords.check_exist(driver, setting_airplane_mode_switch, 5):\r\n airplane_mode_text = keywords.get_element_attribute(driver, setting_airplane_mode_switch, 'text')\r\n if airplane_mode_text == 'OFF':\r\n keywords.click(driver, setting_airplane_mode_switch)\r\n logging.info('enabled airplane mode')\r\n else:\r\n logging.info('airplane mode already enabled')\r\n\r\n # Closing setting panel\r\n command = 'adb shell input keyevent 4'\r\n Android.send_adb_command(command, 5)", "def put_device_to_doze():\r\n Android.bring_device_out_of_doze()\r\n Android.send_adb_command('adb shell dumpsys deviceidle enable>>temp.txt')\r\n open_file = open('temp.txt', 'r')\r\n temp = open_file.read()\r\n open_file.close()\r\n os.remove(\"temp.txt\")\r\n if 'idle mode enabled'.lower() in temp.lower():\r\n # put device to doze mode\r\n Android.send_adb_command('adb shell dumpsys deviceidle force-idle>>temp.txt')\r\n open_file = open('temp.txt', 'r')\r\n temp = open_file.read()\r\n open_file.close()\r\n os.remove(\"temp.txt\")\r\n if 'Now forced in to deep idle mode'.lower() in temp.lower():\r\n logging.info('Device has been forced to doze by using adb command')\r\n return True\r\n else:\r\n logging.error('Device has been enabled to doze but could not be forced dozed by using adb command')\r\n return False\r\n else:\r\n logging.error('Device could not be enabled to doze by using adb command')\r\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that returns true if a string contains a number
def hasNumbers(inputString): return any(char.isdigit() for char in inputString)
[ "def hasnumber(self, s):\n return bool(re.search(r'\\d', s))", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def hasReNumbers(inputString):\n return bool(re.search(r'\\d', inputString))", "def _is_number(string: str) -> bool:\n try:\n int(string)\n return True\n except ValueError:\n return False", "def contains_digits(input_str):\n return any(i in string.digits for i in input_str)\n pass", "def is_a_number(word):\n p = re.compile('^-?[0-9]+$')\n return 1 if p.match(word) else 0", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def contains_digits(input_str: str) -> bool:\r\n return any(c.isdigit() for c in input_str)", "def _is_number(word):\n return bool(re.search(r'^\\d+(.\\d+)?(rd|st|nd|th)?$', word))", "def string_contains_digits(self, string):\n return bool(self.compiledDigitRegex.search(string))", "def _contains_number(text):\n return any((True for n in xrange(10) if str(n) in text))", "def is_number(text):\n try:\n int(text)\n return True\n except ValueError:\n return False", "def is_digits(string: str) -> bool:\n\n output: bool = False\n try:\n num: int = int(string)\n output = True\n except ValueError as e:\n output = False\n return output", "def is_number(string):\n\n try:\n float(string)\n is_num = True\n except ValueError:\n is_num = False\n\n return is_num", "def is_num(self, text) -> bool:\n try:\n int(text)\n return True\n except ValueError:\n return False", "def isDigitString(s):\n\n for char in s:\n if char not in string.digits: return 0\n return 1", "def only_numbers(string):\n for character in string[:-1]:\n if not (character.isdigit() or character in (',', ' ')): \n return False\n return True", "def is_number(str):\n\n # Local constants\n\n # Local variabes\n\n #****** start is_number() ******#\n\n try:\n float(str)\n return True\n except ValueError:\n return False", "def isNumber(word):\n try:\n int(word)\n return True\n except ValueError:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that saves the return_list from make_time to a file called yt_vids.txt Optional, default False
def save_link_time(return_list, path_to_download): # Opens a new file and writes lines to it and saves it at the spot provided with open(os.path.join(path_to_download, "yt_vids.txt"), "w") as w: w.write('\n'.join('{} {} {}'.format( x[0], x[1][0], x[1][1]) for x in return_list))
[ "def write_list_to_file(my_list):\r\n with open(str(filename.strftime(\"%Y-%m-%d-%H-%M-%S-%f\")) + \".txt\", \"w\") as file:\r\n for line in my_list:\r\n file.write(str(line)+ \"\\n\") #Writing line by line\r", "def export_times(self, filename=None):\r\n if filename is None:\r\n filename = askopenfilename()\r\n\r\n if not filename.endswith(\".txt\"):\r\n tk.messagebox.showerror(\"Invalid extension\", \"File extension must be .txt\")\r\n return\r\n\r\n if not exists(filename):\r\n file = open(filename, \"x\")\r\n file.close()\r\n\r\n to_write = []\r\n for time in self.times:\r\n if time.DNF:\r\n line = f\"{time.time}, {time.scramble}, {time.date}, DNF\"\r\n to_write.append(line)\r\n\r\n else:\r\n line = f\"{time.time}, {time.scramble}, {time.date}\"\r\n to_write.append(line)\r\n\r\n to_write = \"\\n\".join(to_write)\r\n with open(filename, \"w\") as f:\r\n f.write(to_write)", "def save_results_mpc(file_out, coords, times, observatory=\"X05\"):\n if len(times) != len(coords):\n raise ValueError(f\"Unequal lists {len(times)} != {len(coords)}\")\n\n with open(file_out, \"w\") as f:\n for i in range(len(times)):\n mpc_line = FileUtils.format_result_mpc(coords[i], times[i], observatory)\n f.write(mpc_line + \"\\n\")", "def savetrial(self, resp, resptime):\n \n s1 = self.design['S1'][self.currenttrial] #Das Vergleichsbild\n s2 = self.design['S2'][self.currenttrial] #Linkes Bild\n s3 = self.design['S3'][self.currenttrial] #Rechtes Bild\n \n #row = [resp, r.index(s1)+1, r.index(s2)+1, r.index(s3)+1]\n row = [resp, s1+1, s2+1, s3+1]\n # row to save is the indices in the vector R, plus one as MLDS package\n # likes indices to start at 1 and not zero.\n \n self.resultswriter.writerow(row)\n print('Trial %d saved' % self.currenttrial)", "def writeStorageTimeList(storage_csv_list,filename_string):\n storage_tm = [[] for i in range(len(unique_major_time))]# Generate a list of paths for each major time.\n list_of_csv_paths = []\n\n for tm in unique_major_time:\n tm_index = unique_major_time.index(tm)# String recorded to check which files are from the same major time period.\n tm_string = '%s.csv' % (tm)\n\n for path in storage_csv_list:\n if tm_string in path:\n storage_tm[tm_index].append(path)\n\n concatenated = Outputs_path + filename_string+ \"_concatenated_\" + tm_string\n list_of_csv_paths.append(concatenated)\n\n with open(concatenated,\"wb\") as out_csv, open(storage_tm[tm_index][0]) as in_csv:\n reader = csv.reader(in_csv, delimiter=',')\n header = next(reader)\n out_csv.write(str(header).replace(\"'\",\"\").replace('[','').replace(']','') + '\\n')\n for row in reader:\n if int(row[5]) >= 10:\n row[5] = '2000-01-%s' % (row[5].replace(' ','')) + ' 00:00:00.000'\n else:\n row[5] = '2000-01-0%s' % (row[5].replace(' ','')) + ' 00:00:00.000'\n line = str(row).replace(\"'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\")\n out_csv.write(line + '\\n')\n\n return list_of_csv_paths", "def save_task_list(self) -> None:\n with open(\"resources/task_list.json\", \"w\") as outfile:\n json.dump(self.task_list, outfile, ensure_ascii=False, indent=4)\n\n self.task_list_hidden[:] = [\n task for task in self.task_list if task[\"completed\"] == \"No\"\n ]\n with open(\"resources/task_list_hidden.json\", \"w\") as outfile:\n json.dump(self.task_list_hidden, outfile, ensure_ascii=False, indent=4)", "def write_detection_times():\n # Start with ZTF Detection Times\n sp_name = 'ZTF.GetDetectionTimes'\n df = sp2df(sp_name=sp_name)\n # Add the CalendarDateTime\n df['CalendarDateTime'] = [mjd_to_datetime(x) for x in df.mjd.values]\n # Array of observation times\n t_obs = df.mjd.values\n # Calculate Earth and Sun vectors at observation times\n q_earth, v_earth = get_earth_vectors(ts=t_obs) \n q_sun, v_sun = get_sun_vectors(ts=t_obs)\n # Calculate the topos adustment\n site_name = 'palomar'\n dq, dv = calc_topos(t_obs=t_obs, site_name=site_name)\n # Position of the observer in space\n q_obs = q_earth + dq\n v_obs = v_earth + dv\n\n # Save to DataFrame\n cols_q_obs = ['qObs_x', 'qObs_y', 'qObs_z',]\n cols_v_obs = ['vObs_x', 'vObs_y', 'vObs_z',]\n cols_q_sun = ['qSun_x', 'qSun_y', 'qSun_z',]\n cols_v_sun = ['vSun_x', 'vSun_y', 'vSun_z',]\n df[cols_q_obs] = q_obs\n df[cols_v_obs] = v_obs\n df[cols_q_sun] = q_sun\n df[cols_v_sun] = v_sun\n\n # Save to KS.DetectionTime\n cols_key = ['DetectionTimeID', 'HiResTimeID', 'mjd', 'CalendarDateTime', 'DataSourceID', 'ObservatoryID',]\n columns = cols_key + cols_q_obs + cols_v_obs + cols_q_sun + cols_v_sun\n df2db(df=df, schema='KS', table='DetectionTime', columns=columns)", "def _save_to_file(self, world, is_smart):\n filepath = self._get_filepath(world._generating_city_name, is_smart, world._generating_scale)\n assert not os.path.exists(filepath), \"File '%s' already exists!\" % filepath\n log.info(\"Saving the new results to {} ...\".format(filepath))\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n with open(filepath, 'wb') as f:\n pickle.dump((world, Params.loader()), f)", "def make_obsfile(obslist, outpath):\n with open(outpath, 'w') as f:\n for obs in obslist:\n f.write(f\"{obs}\\n\")", "def write_list_file(output_file, clip_list_arr):\n list_file = output_file+'_clip_list.txt'\n print \"list_file: \", list_file\n f = open(list_file, 'w')\n for clip in clip_list_arr:\n line = 'file '+clip\n f.write(\"%s\\n\" % line)\n # Add in a divider movie between clips? (it could go here)\n f.close()\n # print 'list_file', list_file\n # print clip_list_arr\n\n return list_file", "def save_txt(simulation,label):\r\n fout = open('Output/{}.txt'.format(label),'w+')\r\n fout.write('# This output.txt file was obtained running a\\n# simulation with these input parameters\\n\\n')\r\n for line in fin.readlines():\r\n fout.write('# ' + line)\r\n fout.write('\\n\\n# -------------------------------------------- #\\n\\n')\r\n fout.write('# [Time] [Atomic inversion function] \\n\\n')\r\n for t in range(0,len(simulation.time)):\r\n fout.write(' ' + \"{:.2f}\".format(np.round(simulation.time[t],2)).rjust(5,'0') + \\\r\n \"\\t\" + \"{:+.10f}\".format(simulation.W_array[t]) + '\\n')", "def write(self, data: List[TPydanticModel], filepath: pathlib.Path) -> None:\n pass", "def save_outcomes():\n all_data = gen_outcomes()\n with open(\"result.json\", \"w\", encoding='utf-8') as jsonfile:\n json.dump(all_data, jsonfile, ensure_ascii=False)", "def create_output_file(output_path, status_list):\n last_subject_id = \"\"\n\n HeaderStr = ['SubjectID', 'Resource', 'Resources Present', 'Data Present']\n with open(output_path, 'w') as output_file:\n output_file.write('\\t'.join(HeaderStr))\n output_file.write('\\n')\n for status in status_list:\n if (status.get_subject_id() != last_subject_id):\n output_file.write('\\n')\n last_subject_id = status.get_subject_id()\n\n output_processing_status(output_file, status)", "def write_idt(cls, oligo_result_list, oligo_output_filename):\n class WellIdGenerator(object):\n \"\"\"Generates 96-plate well ids from A1 ... A12, ..., H1 ... H12\n \"\"\"\n\n LETTER_TRANSITION_TABLE = {\n 'A': 'B',\n 'B': 'C',\n 'C': 'D',\n 'D': 'E',\n 'E': 'F',\n 'F': 'G',\n 'G': 'H',\n 'H': 'A',\n }\n\n\n def __init__(self):\n self.letter = 'A'\n self.number = 1\n\n\n def __iter__(self):\n return self\n\n\n def next(self):\n # Create the current return value.\n current_id = self.letter + \"%02d\" % (self.number,)\n\n # Bump the state.\n if self.number == 12:\n self.letter = self.LETTER_TRANSITION_TABLE[self.letter]\n\n if self.number == 12:\n self.number = 1\n else:\n self.number += 1\n\n # Return current.\n return current_id\n\n\n with open(oligo_output_filename, 'w') as csvfile:\n writer = csv.DictWriter(csvfile,\n cls.IDT_OUTPUT_OLIGO_FIELD_NAMES)\n writer.writeheader()\n for oligo_result, well_id in zip(oligo_result_list, WellIdGenerator()):\n writer.writerow({\n 'target_id': oligo_result.target_id,\n 'well': well_id,\n 'sequence': oligo_result.oligo_seq\n })", "def write_rttm(segs_list, out_rttm_file):\n\n rttm = []\n rec_id = segs_list[0][0]\n\n for seg in segs_list:\n new_row = [\n \"SPEAKER\",\n rec_id,\n \"0\",\n str(round(seg[1], 4)),\n str(round(seg[2] - seg[1], 4)),\n \"<NA>\",\n \"<NA>\",\n seg[3],\n \"<NA>\",\n \"<NA>\",\n ]\n rttm.append(new_row)\n\n with open(out_rttm_file, \"w\") as f:\n for row in rttm:\n line_str = \" \".join(row)\n f.write(\"%s\\n\" % line_str)", "def save_list(todo_list, save_location):\r\n data_file_w = open(save_location,\r\n \"w\") # open the save file and clear the data from it\r\n data_file_w.write(\"Warning: The Todo-List Program will not be able to \"\r\n \"load this save file if it is incorrectly modified. \"\r\n \"Modify at your own risk. The structure is Entry \"\r\n \"Text, Entry Priority as a number, Entry Group as a \"\r\n \"number (Not Yet Utilized, but necessary), and Entry \"\r\n \"Visibility as a boolean, each on a separate line, a \"\r\n \"single line gap in between, and the \"\r\n \"very first line is skipped\\n\")\r\n for item in todo_list:\r\n data_file_w.write(\"{0}\\n{1}\\n{2}\\n{3}\\n\\n\".format(item.text,\r\n str(item.priority),\r\n str(item.group),\r\n str(item.visible)))\r\n data_file_w.close()\r\n return", "def save_time_spent(self):\n\n ratings_dir = Path(self.out_dir).resolve() / cfg.suffix_ratings_dir\n if not ratings_dir.exists():\n makedirs(ratings_dir, exist_ok=True)\n\n timer_file = ratings_dir / '{}_{}_{}'.format(\n self.vis_type, self.suffix, cfg.file_name_timer)\n\n lines = '\\n'.join(['{},{}'.format(sid, elapsed_time)\n for sid, elapsed_time in self.timer.items()])\n\n # saving to disk\n try:\n with open(timer_file, 'w') as tf:\n tf.write(lines)\n except:\n print('Unable to save timer info to disk -- printing them to log:')\n print(lines)\n raise IOError('Error in saving timer info to file!')\n\n # printing summary\n times = np.array(list(self.timer.values()))\n if len(times) < 10:\n print('\\n\\ntimes spent per subject in seconds:\\n{}'.format(lines))\n\n print('\\nMedian time per subject : {} seconds'.format(np.median(times)))\n print('\\t5th and 95th percentile of distribution of times spent '\n ': {} seconds'.format(np.nanpercentile(times, [5, 95])))", "def create_checkfile(artist_list):\n with open(\"checkfile4.txt\", 'w') as checkfile: # we are creating new file named checkfile, hence method r for write\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks: # NOTE: we change below from 2.name back to 2.title\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format(new_artist, new_album, new_song),\n file=checkfile)\n\n # NOTE: python 2 does not allow print above where you have {0.name} etc\n # To run this pring format in python 2, you need to import print_function at the top of code using:\n # from __future__ import print_function" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that downloads a whole video when no interval is supplied Downloaded to the same place where yt_vids is saved to (from save_link_time func)
def download_whole(no_interval): print(os.getcwd()) SAVE_PATH = 'tmp' ydl_opts = {"nocheckcertificate": True, "noplaylist": True, 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'} with youtube_dl.YoutubeDL(ydl_opts) as ydl: for video in range(len(no_interval)): try: ydl.download([no_interval[video]]) except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError: print(f"Couldn't download {no_interval[video]}") continue
[ "def download_video(self):\n track = self.f_name + self.file_type\n # youtube_cmd = [\n # \"youtube-dl\", self.link, \"-f\",\n # self.file_type, \"-o\", track\n # ]\n\n youtube_cmd = [\n \"youtube-dl\", self.link, \"-o\", track, \"-f\", \"webm\"\n ]\n cmd = ' '.join(youtube_cmd)\n for std_out in popen(cmd):\n self.set_status_label(std_out)\n self.status_label.update_idletasks()\n try:\n move(track, self.downloads)\n except Exception:\n self.set_status_label(\"ERROR DOWNLOADING\")", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "def download(videoid, outputpath):\n urlString = 'http://youtube.com/watch?v=' + videoid\n yt = YouTube(urlString)\n yt.streams.filter(progressive=True, file_extension='mp4').order_by(\n 'resolution')[-1].download(output_path=outputpath)\n print(yt.title + ' downlaoded to ' + outputpath)", "def download_video(url,path):\n ydl_opts = {'outtmpl': path, 'format': '22'}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n print('Video downloaded to ' + path)", "def download_video(url, output):\n ydl_opts = {\n # 'format': 'bestvideo[ext=mp4]+bestaudio[ext=wav]/best[ext=mp4]',\n 'format': 'best[ext=mp4]',\n 'outtmpl': output\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])", "def download(videoId, uid = None, savepath = None):\n video_url = 'https://www.youtube.com/watch?v=' + videoId\n res = {}\n if savepath:\n res['savedpath'] = savepath\n else:\n res['savedpath'] = os.getcwd() + os.path.sep + \"downloads/\"\n\n ydl_opts = {}\n # TODO: a. Need to verify the download success/failure and other parameters.\n # b. Need to update the master database of the md5sum, file location of the download, date time, uid, retention period of the file\n # c. Need to update the user schema of the download information.\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n os.chdir(res['savedpath'])\n ydl.download([video_url])\n res['status'] = 'success'\n return res", "def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data", "def download(dltype, num):\n # This function needs refactoring!\n # pylint: disable=R0912\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\" and dltype in (\"da\", \"dv\"):\n plid = g.ytpls[int(num) - 1][\"link\"]\n down_plist(dltype, plid)\n return\n\n elif g.browse_mode == \"ytpl\":\n g.message = \"Use da or dv to specify audio / video playlist download\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n elif g.browse_mode != \"normal\":\n g.message = \"Download must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n writestatus(\"Fetching video info...\")\n song = (g.model.songs[int(num) - 1])\n best = dltype.startswith(\"dv\") or dltype.startswith(\"da\")\n\n if not best:\n\n try:\n # user prompt for download stream\n url, ext, url_au, ext_au = prompt_dl(song)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download aborted!\" + c.w\n g.content = generate_songlist_display()\n return\n\n if not url or ext_au == \"abort\":\n # abort on invalid stream selection\n g.content = generate_songlist_display()\n g.message = \"%sNo download selected / invalid input%s\" % (c.y, c.w)\n return\n\n else:\n # download user selected stream(s)\n filename = _make_fname(song, ext)\n args = (song, filename, url)\n\n if url_au and ext_au:\n # downloading video and audio stream for muxing\n audio = False\n filename_au = _make_fname(song, ext_au)\n args_au = (song, filename_au, url_au)\n\n else:\n audio = ext in (\"m4a\", \"ogg\")\n\n kwargs = dict(audio=audio)\n\n elif best:\n # set updownload without prompt\n url_au = None\n av = \"audio\" if dltype.startswith(\"da\") else \"video\"\n audio = av == \"audio\"\n filename = _make_fname(song, None, av=av)\n args = (song, filename)\n kwargs = dict(url=None, audio=audio)\n\n try:\n # perform download(s)\n dl_filenames = [args[1]]\n f = _download(*args, **kwargs)\n if f:\n g.message = \"Saved to \" + c.g + f + c.w\n\n if url_au:\n dl_filenames += [args_au[1]]\n _download(*args_au, allow_transcode=False, **kwargs)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download halted!\" + c.w\n\n try:\n for downloaded in dl_filenames:\n os.remove(downloaded)\n\n except IOError:\n pass\n\n if url_au:\n # multiplex\n mux_cmd = \"APP -i VIDEO -i AUDIO -c copy OUTPUT\".split()\n mux_cmd = \"%s -i %s -i %s -c copy %s\"\n mux_cmd = [g.muxapp, \"-i\", args[1], \"-i\", args_au[1], \"-c\",\n \"copy\", args[1][:-3] + \"mp4\"]\n\n try:\n subprocess.call(mux_cmd)\n g.message = \"Saved to :\" + c.g + mux_cmd[7] + c.w\n os.remove(args[1])\n os.remove(args_au[1])\n\n except KeyboardInterrupt:\n g.message = \"Audio/Video multiplex aborted!\"\n\n g.content = generate_songlist_display()", "def download_video(title, url):\n new_path = DL_PATH + title\n if os.path.exists(\"%s.mp4\" % new_path):\n print \"Already downloaded %s.mp4. Skipping...\\n---\\n\" % new_path\n return None\n print \"Creating file: %s.mp4\" % new_path\n with open(\"%s.mp4\" % new_path, \"wb\") as the_file:\n print \"File %s.mp4 created. Downloading and writing...\" % new_path\n if url == None:\n print \"No URL, can't download...\\n---\\n\"\n os.remove(\"%s.mp4\" % new_path)\n return None\n try:\n req = requests.get(url, stream=True)\n except OSError:\n #TODO: this is silly, we can get rid of this by checking +\n #creating the path at the function's beginning.\n print \"Invalid filepath. Try a new one.\\n---\\n\"\n return None\n for block in req.iter_content(1024):\n if not block:\n break\n the_file.write(block)\n print \"Success!\\n---\\n\"\n return True", "async def download_youtube_video_into_mp3(self, ctx, *, links : str):\n if not links.startswith(\"http\"):\n await self.bot.say(\"*`Please Start with a HTTP URL!`*\")\n return\n links = links.split(\" \")\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass\n if len(links) > 1 and (not links[1].startswith(\"-\") or len(links) != 2):\n embed = discord.Embed(title=\"Multiple Youtube Download... [{} Total]\".format(len([link for link in links])), description=\"*All done by Youtube-DL*\")\n embed_msg = await self.bot.say(embed=embed)\n before = datetime.datetime.now()\n x = 0\n fp = \"/media/seacow/Music/{}.zip\".format(\"Songs_for_\"+ctx.message.author.name.replace(\" \", \"_\"))\n zips = ZipFile(fp, \"w\")\n while x < len(links):\n dont_convert = False\n dont_upload = False\n if \"-noconvert\" in links[x]:\n del links[x]\n dont_convert = True\n x = x - 1\n elif \"-noupload\" in links[x]:\n del links[x]\n x = x - 1\n link = links[x]\n afterdl = await self.download_video_song(link, dont_convert = dont_convert, dont_upload = dont_upload, multi=True, embed=embed, embed_msg=embed_msg)\n z = afterdl[1]\n if z != None:\n zips.write(z, arcname=z.split(\"/\")[-1])\n x += 1\n zips.close()\n after = datetime.datetime.now()\n elapsed = after - before\n embed = afterdl[0]\n embed.add_field(name=\"Downloads Complete! Uploading...\", value=\"*Took {0.seconds} seconds*\".format(elapsed), inline=False)\n await self.bot.edit_message(embed_msg, embed=embed)\n await self.bot.upload(fp)\n os.remove(fp)\n embed.set_field_at(-1, name=\"Upload Complete!\", value=embed.fields[-1].value, inline=embed.fields[-1].inline)\n await self.bot.edit_message(embed_msg, embed=embed)\n else:\n links = \" \".join(links)\n dont_convert = False\n dont_upload = False\n if \"-noconvert\" in links:\n dont_convert = True\n elif \"-noupload\" in links:\n dont_upload = True\n link = links.split(\" \")[0]\n await self.download_video_song(link = link, dont_convert = dont_convert, dont_upload = dont_upload)", "def test_download_only(self):\r\n v = video(video_ogv_url='http://example.com/OGV_VIDEO',\r\n video_ogv_download_only=False,\r\n video_mp4_url='http://example.com/MP4_VIDEO',\r\n video_mp4_download_only=True,\r\n save=True)\r\n\r\n resp = self.client.get(v.get_absolute_url())\r\n # This shows up in video tag and in downloads area\r\n eq_(resp.content.count(b'OGV_VIDEO'), 2)\r\n # This only shows up in downloads area\r\n eq_(resp.content.count(b'MP4_VIDEO'), 1)", "def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):\n \n raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id)\n video_info = parse.parse_qs(raw_video_info)\n \n if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']):\n title = parse.unquote_plus(video_info['title'][0])\n stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',')\n \n else:\n # Parse video page when video_info is not usable.\n video_page = get_content('http://www.youtube.com/watch?v=%s' % id)\n ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\\s*=\\s*([^\\n]+);'))\n \n title = ytplayer_config['args']['title']\n stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')\n \n streams = {\n parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream)\n for stream in stream_list\n }\n \n for codec in yt_codecs:\n itag = str(codec['itag'])\n if itag in streams:\n download_stream = streams[itag]\n break\n \n url = download_stream['url'][0]\n if 'sig' in download_stream:\n sig = download_stream['sig'][0]\n else:\n sig = decrypt_signature(download_stream['s'][0])\n url = '%s&signature=%s' % (url, sig)\n \n type, ext, size = url_info(url)\n \n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([url], title, ext, size, output_dir, merge = merge)", "def download_video(video_url, output_path, output_name=\"\", default_type=\"mp4\", verbose=False):\n try:\n if \".\" not in output_name:\n output_name = f\"{output_name}.{default_type}\"\n output_path = os.path.join(output_path, output_name)\n api_response = core.get_request_with_retries(video_url)\n core_utils.print_if_verbose('Processing...', verbose)\n f = open(output_path, 'wb')\n for chunk in api_response.iter_content(chunk_size=255):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n core_utils.print_if_verbose(f'The video has been exported here: {output_path}', verbose)\n f.close()\n except Exception as exception_msg:\n print(f\"The video could not be downloaded due to the following error: {exception_msg}\")\n return", "def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}", "def update_yt_dl():# TODO", "def download_videos_by_their_ids(self):\n self.n_downloaded = 0\n for i, vid in enumerate(self.videoids_to_download):\n n_seq = i + 1\n self.issue_download(n_seq, vid)", "def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))", "def download_video(all_videos, videos_path):\n for category in all_videos:\n ctr = 0\n for type in all_videos[category]:\n for v_idx, video in enumerate(all_videos[category][type]):\n print(\"Downloading video {}\".format(video))\n yt = pytube.YouTube(video)\n out_file = \".mp4\".format(video)\n stream = yt.streams.filter(file_extension=\"mp4\").first()\n stream.download(videos_path,\n filename='video_{}_{}'.format(category, ctr))\n\n print(os.path.join(\"videos\", \"{}.mp4\".format(yt.title)))\n ctr += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to download videos in specified intervals Takes a list (interval_list) and a path as inputs
def download_interval(interval_list): start = ['start', 'begin', 'beginning', 'head', 'first'] end = ['slut', 'end', 'tail', 'finish', 'finito', 'fin', 'done', 'finished'] # Iterate over the list for link in range(len(interval_list)): try: video = pafy.new(interval_list[link][0], ydl_opts={ 'nocheckcertificate': True, "noplaylist": True}) # Only downloads the video if the video hasn't been downloaded before if not os.path.exists(os.path.join("tmp", f"{video.title}.mp4")): video_s = video.getbestvideo() # TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list video_a = video.getbestaudio() # Checks if the end point is a string if interval_list[link][1][1].lower() in end: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=video.duration) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=video.duration) else: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) # Do the processing try: ( ffmpeg .concat( # Specify what you want from the streams (v for video and a for audio) mp4_vid['v'], mp4_aud['a'], # One video stream and one audio stream v=1, a=1 ) # Output is title of video with mp4 ending .output(os.path.join("tmp", f'{video.title}.mp4')) .run() ) except TypeError as e: print(f"An error occurred e 0: {e}") except ffmpeg._run.Error as e: print(f"An error occurred e 1: {e}") except Exception as e: print(f"I couldn't download {interval_list[link]} due to: {e}")
[ "def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n try:\n ydl.download([no_interval[video]])\n except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError:\n print(f\"Couldn't download {no_interval[video]}\")\n continue", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "def download(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n filename = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n filename.append(video[\"file\"])\n if filename:\n for name in filename:\n downloadvideo(name)\n else:\n safeprint(\"No video matching the given query was found.\")", "def download_video(all_videos, videos_path):\n for category in all_videos:\n ctr = 0\n for type in all_videos[category]:\n for v_idx, video in enumerate(all_videos[category][type]):\n print(\"Downloading video {}\".format(video))\n yt = pytube.YouTube(video)\n out_file = \".mp4\".format(video)\n stream = yt.streams.filter(file_extension=\"mp4\").first()\n stream.download(videos_path,\n filename='video_{}_{}'.format(category, ctr))\n\n print(os.path.join(\"videos\", \"{}.mp4\".format(yt.title)))\n ctr += 1", "def download_videos_in_playlist(playlist, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n num_videos = len(playlist)\n\n for i, entry in enumerate(playlist):\n filename = str(entry['index'])\n output_path = os.path.join(output_dir, '%s.mp4' % (filename))\n command = 'youtube-dl -U {} -f mp4 -o {}'.format(\n entry['url'], output_path)\n print('Downloading video (%d / %d) from %s...' %\n (i, num_videos, entry['url']))\n os.system(command)", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def downloadStreams(vidURL, audURL, startPoint, duration, filename):\r\n ff = FFmpeg(\r\n inputs={\r\n f\"{vidURL}\" : ['-ss', f\"{startPoint}\", '-t', f\"{duration}\"],\r\n f\"{audURL}\" : ['-ss', f\"{startPoint}\", '-t', f\"{duration}\"]\r\n },\r\n outputs={\r\n f\"{filename}\": ['-map', '0:v:0', '-map', '1:a:0', '-y']\r\n }\r\n )\r\n\r\n ff.run()", "def download_videos_by_their_ids(self):\n self.n_downloaded = 0\n for i, vid in enumerate(self.videoids_to_download):\n n_seq = i + 1\n self.issue_download(n_seq, vid)", "def download_class_videos(vid_class, vid_class_info):\n vid_list = []\n index = 0\n for id, start in vid_class_info:\n if id == '#NAME?':\n continue # SKIP if no valid youtube_id\n vid_url = 'http://youtube.com/watch?v=' + id\n try:\n vid_info = ydl.extract_info(vid_url, download=False)\n url = vid_info['url']\n # vid_name = re.sub(r'\\s+', '_', vid_class) + id + \".avi\"\n vid_name = re.sub(r'\\s+', '_', vid_class) + \"_\" + index + \".avi\"\n ret = subprocess.call([\"ffmpeg\", \"-i\", url, \"-ss\", start, \"-t\", VIDEO_DURATION, \"-c:v\", \"libx264\", vid_name]) # DOWNLOAD VIDEOS\n if ret == 0:\n LogFile.write(\"pass, %s, %s\\n\" % (vid_class, id))\n vid_list.append(vid_name)\n else:\n LogFile.write(\"fail, %s, %s\\n\" % (vid_class, id))\n except Exception as e:\n LogFile.write(\"fail, %s, %s\\n\" % (vid_class, id))\n index = index + 1\n return vid_list", "def get_download_links(clips_twitch_response):\n\n # Use the Chrome driver\n driver = webdriver.Chrome()\n\n download_links = []\n clips_names = []\n total_duration = 0\n for clip in clips_twitch_response:\n driver.get(clip['url'])\n try:\n # Wait until src attribute is generated for the video\n WebDriverWait(driver, 10).until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, \"video[src]\")))\n\n # Find the src url inside a <div class = player-video><video src = DOWNLOAD_URL><\\video><\\div>\n element = driver.find_element_by_css_selector(\"video[src]\")\n url = element.get_attribute('src')\n\n download_links.append(url)\n clips_names.append(clip['broadcaster_name'])\n\n # Find the video duration\n element = driver.find_element_by_css_selector(\"div.player-slider[aria-valuemax]\")\n duration = int(float(element.get_attribute(\"aria-valuemax\"))) # Convert to int to have a lower bound\n total_duration += duration\n if total_duration > VIDEO_DURATION_THRESHOLD:\n break\n\n except Exception:\n print(\"ERROR - Can't get the clip download url:{}\".format(clip['url']))\n\n driver.close()\n\n return download_links, clips_names", "def test_download_videos(self):\n videos = glob.glob('test_videos/*.mp4')\n\n self.assertEqual(len(videos), 2)", "def download_videos(data, category):\n # file_ids = get_existing_file_ids()\n\n # Sorry: This is gross.\n directory = os.path.abspath('./' + slugify(category))\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n print 'Saving files to {0}'.format(directory)\n\n start_time = time.time()\n failed_videos = []\n\n for line in data[category]:\n print ''\n print 'Working on {0} - {1}'.format(line[0], line[2])\n\n # if line[0] in file_ids:\n # print ' Skipping -- already got it'\n # continue\n\n fn = '{0}_{1}'.format(line[0], slugify(line[2]))\n try:\n download_video(line[3], os.path.join(directory, fn))\n except NoDownloadMeNoLikeyException:\n failed_videos.append(line)\n\n print ''\n if failed_videos:\n print 'FAILED VIDEOS:'\n for fail in failed_videos:\n print ' ' + '\\t'.join(fail)\n print ''\n\n print 'Total videos: {0}'.format(len(data[category]))\n print 'Total time: {0}'.format(format_duration(time.time() - start_time))\n return 0", "def download_video(self):\n track = self.f_name + self.file_type\n # youtube_cmd = [\n # \"youtube-dl\", self.link, \"-f\",\n # self.file_type, \"-o\", track\n # ]\n\n youtube_cmd = [\n \"youtube-dl\", self.link, \"-o\", track, \"-f\", \"webm\"\n ]\n cmd = ' '.join(youtube_cmd)\n for std_out in popen(cmd):\n self.set_status_label(std_out)\n self.status_label.update_idletasks()\n try:\n move(track, self.downloads)\n except Exception:\n self.set_status_label(\"ERROR DOWNLOADING\")", "def download_for_dates(date_list, url, username, password, file_prefix, file_extension):\n\n for i in date_list:\n new_url = eu.modify_url_date(url, i )\n file_name = file_prefix + str(i) + file_extension\n output_file = get_url(new_url, username, password, file_name)\n print(\"Downloaded: \"+output_file)", "def download_for_dates(date_list, url, username, password, file_prefix, file_extension):\r\n\r\n for i in date_list:\r\n new_url = eu.modify_url_date(url, i)\r\n file_name = file_prefix + str(i) + file_extension\r\n output_file = get_url(new_url, username, password, file_name)\r\n print(\"Downloaded: \" + output_file)", "def download_video(url,path):\n ydl_opts = {'outtmpl': path, 'format': '22'}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n print('Video downloaded to ' + path)", "def getVideos():\n return getFilesFromPath(\"videos/\")", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to download pictures from the input sequence
def download_pics(pics_links): for link in range(len(pics_links)): r = requests.get(pics_links[link][0]) with open(os.path.join("tmp", f"{link}.jpg"), "wb") as dl: dl.write(r.content)
[ "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download():\n\n search_query = input(\"Enter image name: \")\n\n #google image search concatinating with search_query to search web\n url = \"https://www.google.co.in/search?hl=en&tbm=isch&source=hp&biw=1296&bih=630&ei=FhcPXfXhPIbavgTYl6K4Aw&q=\"+\\\n search_query.replace(\" \",\"+\")+\"&oq=\"+search_query.replace(\" \", \"+\") \n try:\n req = requests.get(url) #getting result by http get request method\n try:\n soup = BeautifulSoup(req.text, \"html.parser\")\n images = soup.find_all(\"img\") #searching img tag from html parsed object\n count = 0\n for image in images:\n resp = requests.get(image['src']) #get the src address of listed images\n with open(search_query+str(count)+\".jpeg\", \"wb\") as f: #save byte form as search_query+count+extension(.jpeg) \n f.write(resp.content)\n count += 1\n print(\"Images downloaded Successfully\")\n except Exception as e:\n print(e)\n except Exception as e:\n print(\"Check internet connection or it may be request error\")", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break", "def download_images(keyword, limit = 1):\n #creating list of arguments\n arguments = {\"keywords\": keyword ,\n \"limit\": limit , \n \"print_urls\": False,\n \"output_directory\": OUT_DIR} \n\n # Pass the arguments to above function and download images\n paths = response.download(arguments)", "def read_and_download(url, img_num, filename=\"./mask_images/\"):\n try:\n img_data = urlopen(url).read()\n # print(img_data)\n # b_data = binascii.unhexlify(img_data)\n # img = Image.open(io.BytesIO(b_data))\n img = Image.open(io.BytesIO(img_data))\n path = os.path.join(filename, f\"img_{img_num}.png\")\n img.save(path)\n except Exception as e:\n print(e)", "async def dl_image(url, filename):\n\ttry:\n\t\twith aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url) as resp:\n\t\t\t\ttest = await resp.read()\n\t\t\t\twith open('data/tmp/'+filename.lower(), \"wb\") as f:\n\t\t\t\t\tf.write(test)\n\t\t\t\treturn 0\n\texcept Exception as e:\n\t\tprint('[!ERROR!] in Get image')\n\t\tprint(e)\n\t\treturn -1", "def get_multi_img(self, img_url, referer_url, pic_num, pic_name, suffix, artist_name):\r\n global jpg_success_num, png_success_num, break_num_rec\r\n for num in range(pic_num): #默认jpg下载\r\n img_url = str(img_url)\r\n if not num == 0:\r\n if \".jpg\" in img_url:\r\n img_url = img_url.replace(\"_p{0}.jpg\".format(num - 1), \"_p{0}.jpg\".format(num))\r\n elif \".png\" in img_url:\r\n img_url = img_url.replace(\"_p{0}.png\".format(num - 1), \"_p{0}.jpg\".format(num))\r\n suffix = \"jpg\"\r\n print(\"\\t尝试jpg文件下载...\")\r\n print(\"Requests:{0}\".format(img_url))\r\n img_url_source = img_url #暂时保存jpg url供master1200下载使用\r\n # print(\"for循环编号测试:{0}_p{1}.{2}\".format(pic_name, num, suffix))\r\n pic_tem_list = []\r\n pic_tem_list.append(\"{0}_p{1}.{2}\".format(pic_name, num, 'jpg'))\r\n pic_tem_list.append(\"{0}_p{1}.{2}\".format(pic_name, num, \"png\"))\r\n pic_tem_list.append(\"{0}_p{1}.{2}\".format(pic_name + \"_master1200\", num, \"jpg\"))\r\n pic_tem_list.append(\"{0}_p{1}.{2}\".format(pic_name + \"_master1200\", num, \"png\"))\r\n if (pic_tem_list[0] in self.picture_list) | (pic_tem_list[1] in self.picture_list) | (pic_tem_list[2] in self.picture_list) | (pic_tem_list[3] in self.picture_list):\r\n print(\"{0}_p{1}.{2} or png 文件存在在picture_list已有下载记录,跳过下载...\\n\".format(pic_name, num, suffix))\r\n break_num_rec = break_num_rec + 1\r\n continue\r\n elif self.pic_exist(pic_name, num, suffix, artist_name=artist_name):\r\n with open(\"{0}\\\\collection_list.txt\".format(self.download_path), 'a', encoding='utf8') as pic_list_write:\r\n pic_list_write.write(\"{0}_p{1}.{2}\\n\".format(pic_name, num, suffix))\r\n break_num_rec = break_num_rec + 1\r\n print(\"{0}_p{1}.{2}写入成功...\\n\".format(pic_name, num, suffix))\r\n continue\r\n else:\r\n break_num_rec = 0\r\n try:\r\n response = se.get(img_url, headers=self.headers, stream=True, verify=False, timeout=(time_out,time_out_add)) #加verify防止SSL报错2:::有效,谨慎删除\r\n print(\"\\tresponse 状态:\", response.status_code)\r\n # if not response.status_code == 200:\r\n # raise IndexError\r\n except:\r\n print(\"{0}_p{1}--->服务器请求失败,将重试{2}次请求...\".format(pic_name,num, retry_num))\r\n for i in range(retry_num):\r\n time.sleep(0.1)\r\n try:\r\n response = se.get(img_url, headers=self.headers, stream=True, verify=False, timeout=(time_out,time_out_add))\r\n if response.status_code == 200:\r\n break\r\n except:\r\n pass\r\n # with closing(se.get(img_url, headers=self.headers, stream=True, verify=False)) as response:\r\n try:\r\n statu_code_tem = int(response.status_code)\r\n except:\r\n statu_code_tem = 555 #手动设置异常\r\n if statu_code_tem == 200: # 网页正常打开\r\n jpg_success_num += 1\r\n # img = response.content\r\n self.download_only(response, img_url=img_url, referer_url=referer_url, pic_num=pic_num, pic_name=pic_name, suffix=suffix, num=num, artist_name=artist_name) #启动下载\r\n else:\r\n print(\"\\tjpg格式下载错误,尝试png...\")\r\n img_url = str(img_url).strip(\".jpg\") + \".png\"\r\n suffix = \"png\"\r\n # print(\"for循环编号测试:{0}_p{1}.{2}\".format(pic_name, num, suffix))\r\n if \"{0}_p{1}.{2}\".format(pic_name, num, suffix) in self.picture_list:\r\n print(\"{0}_p{1}.{2} 文件存在在picture_list已有下载记录,跳过下载...\".format(pic_name, num, suffix))\r\n continue\r\n elif self.pic_exist(pic_name, num, suffix,artist_name=artist_name):\r\n with open(\"{0}\\\\collection_list.txt\".format(self.download_path), 'a', encoding='utf8') as pic_list_write:\r\n pic_list_write.write(\"{0}_p{1}.{2}\\n\".format(pic_name, num, suffix))\r\n print(\"{0}_p{1}.{2}写入成功...\\n\".format(pic_name, num, suffix))\r\n continue\r\n else:\r\n try:\r\n response = se.get(img_url, headers=self.headers, stream=True, verify=False, timeout=(time_out,time_out_add)) #加verify防止SSL报错2:::有效,谨慎删除\r\n print(\"\\tresponse 状态:\", response.status_code)\r\n # if not response.status_code == 200:\r\n # raise IndexError\r\n except:\r\n print(\"{0}_p{1}--->服务器请求失败,将重试{2}次请求...\".format(pic_name,num, retry_num))\r\n for i in range(retry_num):\r\n time.sleep(0.3)\r\n try:\r\n response = se.get(img_url, headers=self.headers, stream=True, verify=False, timeout=(time_out,time_out_add))\r\n if response.status_code == 200:\r\n break\r\n except:\r\n pass\r\n try:\r\n statu_code_tem = int(response.status_code)\r\n except:\r\n statu_code_tem = 555 # 手动设置异常\r\n if statu_code_tem == 200: # 网页正常打开\r\n png_success_num += 1\r\n # img = response.content\r\n self.download_only(response, img_url=img_url, referer_url=referer_url, pic_num=pic_num, pic_name=pic_name, suffix=\"png\", num=num, artist_name=artist_name) # 启动下载\r\n else: # self, img_url, pic_name, num\r\n self.download_retry(img_url=img_url_source, pic_name=pic_name, num=num, referer_url=referer_url, pic_num=pic_num, artist_name=artist_name) # 打开失败,尝试其它情况\r", "async def _download_img(h_list: list, path: Path):\n session = aiohttp.ClientSession()\n for h in h_list[:9]:\n index = h_list.index(h)\n img_url = urljoin(dctrad_base, h['src'])\n resp = await session.get(url=img_url)\n buffer = io.BytesIO(await resp.read()) # buffer is a file-like object\n file_ = path / f\"img{index}.jpg\"\n with open(file_, 'wb') as out_file:\n shutil.copyfileobj(buffer, out_file)\n await session.close()", "def download_all_images(self, dir_, ids_file, base_url, need_reload_file):", "def download(url, name):\n\n try:\n response = requests.get(url)\n except requests.exceptions.ConnectionError:\n print(\"Connection is missing pleas check connection\")\n\n if response.status_code == 200:\n with open(\"web_images/\" + \"image\" + name + \".png\", \"wb\") as file:\n file.write(response.content)", "def get_urlpic(url, id, out_path, pic_format):\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n path = os.path.join(out_path, f\"{id}\"+pic_format)\n urllib.request.urlretrieve(url, path)", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)", "def get_image(self, index):\r\n \r\n # Get request to get all the links for all exercises\r\n image = requests.get(API.url_image, headers = self.headers).json()\r\n filename = download(image[index]['image'])", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def download_card_images(self, card_names, lang=\"en\"):\n for card_name in card_names:\n print(\"Dowloading card imgs for \\'\" + card_name + \"\\' (\" + lang + \")\")\n output_file_name = card_name + \".jpg\"\n output_file_path = IoManager.CARD_IMAGES_PATH_EN + \"/\" + output_file_name if lang == \"en\" else IoManager.CARD_IMAGES_PATH_FR + \"/\" + output_file_name\n output_file_path = output_file_path.replace('//', '__')\n en_url, fr_url = self.get_card_urls(card_name)\n url = en_url if lang == \"en\" else fr_url\n # Open the url image, set stream to True, this will return the stream content.\n resp = requests.get(url, stream=True)\n # Open a local file with wb ( write binary ) permission.\n local_file = open(output_file_path, 'wb')\n # Set decode_content value to True, otherwise the downloaded image file's size will be zero.\n resp.raw.decode_content = True\n # Copy the response stream raw data to local image file.\n shutil.copyfileobj(resp.raw, local_file)\n # Remove the image url response object.\n del resp", "def download_photos(self):\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n all_images = soup.find_all('img')\n print('number of photos: ', len(all_images))\n\n for index, image in enumerate(all_images):\n file_name = 'image_' + str(index) + '.jpg'\n image_path = os.path.join(self.path, file_name) # create image path\n link = image['src']\n print('Downloading image #' + str(index))\n try:\n response = requests.get(link, stream=True) # get the image\n with open(image_path, 'wb') as file:\n shutil.copyfileobj(response.raw, file) # save image\n except requests.exceptions.MissingSchema:\n print('Couldn\\'t download image #' + str(index))\n print('image link: ' + str(link))", "def look_for_images(pages_list, saves_list, env_data):\n\n for i in pages_list:\n\n res = requests.get(i)\n res.raise_for_status()\n\n soup_image = bs4.BeautifulSoup(res.text, \"html.parser\").select('img')\n image_count = len(soup_image)\n image_url = get_random_image(soup_image, image_count, env_data)\n\n if image_url is False:\n continue\n\n print(\"\\n Beginning download...\")\n\n if str(image_url).endswith('.jpg 2x'):\n image_url = str(image_url.replace(' ', '')[:-2])\n if not str(image_url).startswith(\"http\"):\n image_url = \"https://\" + str(image_url)\n\n response = requests.get(image_url, stream=True)\n response.raise_for_status()\n saves_list.append(str(image_url))\n save_image(image_url, response, env_data)\n\n print('\\nImage downloaded!')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get chain attribute for an object.
def chain_getattr(obj, attr, value=None): try: return _resolve_value(safe_chain_getattr(obj, attr)) except AttributeError: return value
[ "def get_deep_attr(obj, value):\n subelts = value.split('.', 1)\n if len(subelts) == 1:\n return getattr(obj, value)\n else:\n return get_deep_attr(getattr(obj, subelts[0]), subelts[1])", "def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr(target, attr)\n return target", "def get_attr(obj, name):\n if hasattr(obj, name):\n return getattr(obj, name)\n\n return None", "def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None", "def get_nested_attribute(obj, attrib_str: str):\n attrs = attrib_str.split(\".\")\n val = obj\n for attr in attrs:\n val = val.__getattribute__(attr)\n\n return val", "def load_attr(self, obj, attr):\n return getattr(obj, attr)", "def getChainFor(self, ob):\n cbt = self._chains_by_type\n if isinstance(ob, str):\n pt = ob\n elif hasattr(aq_base(ob), 'getPortalTypeName'):\n pt = ob.getPortalTypeName()\n else:\n pt = None\n\n if pt is None:\n return ()\n\n chain = None\n if cbt is not None:\n chain = cbt.get(pt, None)\n # Note that if chain is not in cbt or has a value of\n # None, we use a default chain.\n if chain is None:\n return self.getDefaultChain()\n return chain", "def get_chain(self):\n return self.chain", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def __getattribute__(self,name):\n try:\n return object.__getattribute__(self,name)\n except AttributeError:\n extraPO = object.__getattribute__(self,'_extraPO')\n\n if hasattr(extraPO,name):\n return getattr(extraPO,name) # HIDDEN!\n\n _attr_err_msg = object.__getattribute__(self,'_attr_err_msg')\n\n raise AttributeError(_attr_err_msg(name,[self,extraPO]))", "def _obj_getattr(obj, fqdn, start=1):\n node = obj\n for chain in fqdn.split('.')[start:]:\n if hasattr(node, chain):\n node = getattr(node, chain)\n else:\n node = None\n break\n return node", "def _getter(obj, attr):\n _get = attrgetter(attr)\n try:\n return _get(obj)\n except:\n return None", "def csv_getvalue(obj, path):\n path = path.split('__', 1)\n attr_name = path[0]\n\n if obj is None:\n # Record object is empty, return None\n return None\n if len(path) == 1:\n # Return the last leaf of the path after evaluation\n attr = csv_getattr(obj, attr_name)\n\n if isinstance(attr, models.Model):\n # Attribute is a model instance. Return unicode.\n return unicode(attr)\n elif hasattr(attr, '__call__'):\n # Attribute is a callable method. Return its value when called.\n return attr()\n else:\n # Otherwise, assume attr is a simple value\n return attr\n elif len(path) == 2:\n # More of path is remaining to be traversed\n attr = csv_getattr(obj, attr_name)\n\n if attr is None:\n return None\n elif isinstance(attr, models.Model):\n # If attribute is a model instance, traverse into it\n return csv_getvalue(attr, path[1])\n else:\n raise AttributeError('CsvForm: Attribute \\'{0}\\' on object \\'{1}\\' is not a related model'.format(\n attr_name,\n obj._meta.object_name,\n ))", "def test_object_attr(self):\n print(object.__dict__['__getattribute__'])", "def __getattribute__(self, name):\n return object.__getattribute__(object.__getattribute__(self, 'orig'),\n name)", "def getattribute(instance, name) :\n \n # avoid recursion to get proxy methods\n if name == '_Proxy__methods' :\n return object.__getattribute__(instance, name)\n\n # attribute directly defined in the instance ?\n if name not in instance._Proxy__methods :\n try :\n # print('direct :\\t', name)\n return object.__getattribute__(instance, name)\n except AttributeError :\n pass\n\n # attribute defined in the proxy object ?\n # print('proxied :\\t', name)\n return object.__getattribute__(instance._proxy, name)", "def add_attr(chain, attrs):\n chain.TotBandEnergy = attrs.get(\"TotBandEnergy\")\n if attrs.get(\"climbSet\", False):\n chain.climbSet = True\n chain.climbers = attrs.get(\"climbers\")\n chain.locks = attrs.get(\"locks\")\n return chain", "def get_attr(self, termid, attr):\n return self.G.node[termid][attr]", "def __getattribute__(self, value):\n # allow _proxy and _original_ref to access members of this class directly\n if value in (\"_proxy\", \"_original_ref\", \"__eq__\"):\n return object.__getattribute__(self, value)\n\n # OpenAPI 3.1.0 allows Reference objects to make the summary and description\n # fields of the object they're referencing, but only if they have the field\n # defined _and_ the field exists in the type they're referencing.\n if value in (\"summary\", \"description\"):\n if hasattr(self._proxy, value) and getattr(self._original_ref, value) is not None:\n return getattr(self._original_ref, value)\n\n # otherwise, return the value of the proxied object\n return getattr(self._proxy, value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
trim the list to make total length no more than limit.If split specified,a string is return.
def trim_iterable(iterable, limit, *, split=None, prefix='', postfix=''): if split is None: sl = 0 join = False else: sl = len(split) join = True result = [] rl = 0 for element in iterable: element = prefix + element + postfix el = len(element) if len(result) > 0: el += sl rl += el if rl <= limit: result.append(element) else: break if join: result = split.join(result) return result
[ "def Trim(lst: List[T], limit: int) -> List[T]:\n limit = max(0, limit)\n\n clipping = lst[limit:]\n del lst[limit:]\n return clipping", "def allow_max_length(self, input_list, max_len=150):\n if (len(input_list) > max_len):\n return input_list[:max_len]\n return input_list", "def soar_trimlist(org_list):\n if not isinstance(org_list, list):\n return org_list\n return [element.strip() for element in org_list]", "def trim1(l, proportiontocut, tail='right'):\n if tail == 'right':\n lowercut = 0\n uppercut = len(l) - int(proportiontocut * len(l))\n elif tail == 'left':\n lowercut = int(proportiontocut * len(l))\n uppercut = len(l)\n return l[lowercut:uppercut]", "def reduce_list_size(li):\n size = sys.getsizeof(li)\n keep = li\n toss = []\n n = len(li)\n decrement_by = max(n / 10, 10)\n while (size >= MAX_SIZE) and (n > 0):\n n -= decrement_by\n toss = li[:-n]\n keep = li[-n:]\n size = sys.getsizeof(keep)\n return keep, toss", "def ltrim1 (l,proportiontocut,tail='right'):\r\n if tail == 'right':\r\n lowercut = 0\r\n uppercut = len(l) - int(proportiontocut*len(l))\r\n elif tail == 'left':\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l)\r\n return l[lowercut:uppercut]", "def trimlist(self, iplist, maxIPs = 3):\n \n if len(iplist) > 0:\n iplist = iplist[:maxIPs:]\n return iplist\n else:\n print(\"Error: No IP A-records found.\")\n return None", "def rsplit(self, sep=None, maxsplit=-1): # real signature unknown; restored from __doc__\n return []", "def ltrimboth (l,proportiontocut):\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l) - lowercut\r\n return l[lowercut:uppercut]", "def repair_size_list(self, str_val):\n return [word for word in str_val[2:-2].split('\\', \\'')]", "def shorten_list(l, max_length=15):\n length = len(l)\n if length > max_length:\n sl = l[0:max_length]\n sl.append(\"...%i total\" % length)\n else:\n sl = l\n return sl", "def split(self, sep=None, maxsplit=-1): # real signature unknown; restored from __doc__\n return []", "def trimboth(l, proportiontocut):\n lowercut = int(proportiontocut * len(l))\n uppercut = len(l) - lowercut\n return l[lowercut:uppercut]", "def test_trim(self):\n s1 = 'esrdctfvubfiqisqwduonq'\n assert lws.trim(s1, 5) == 'esrdc...'\n assert lws.trim(s1, 20) == 'esrdctfvubfiqisqwduo...'\n s2 = 'asdasdasd'\n assert lws.trim(s2) == 'asdasdasd'", "def reduce_list_of_str_floats_to_16(origList):\n \n #newList = [item[:16] for item in origList]\n \n charLimit = 16\n \n newList = []\n \n for item in origList:\n if len(item) > charLimit:\n newList.append(item[:charLimit])\n else:\n newList.append(item)\n \n return newList", "def _rsplit(value, sep, maxsplit=None):\n\tstr_parts = value.split(sep)\n\tif (maxsplit is not None) and (len(str_parts) > 1):\n\t\treturn [str.join(sep, str_parts[:-maxsplit])] + str_parts[-maxsplit:]\n\treturn str_parts", "def splitblocks(lst, limit):\r\n res = []\r\n start = 0\r\n while start < len(lst):\r\n res.append(lst[start:start + limit])\r\n start += limit\r\n return res", "def split_and_strip_string(\n given_string: str, split_char: str, max_split: SupportsIndex = -1\n) -> List[str]:\n # Removes whitespace from ends of result strings before adding to list. Allow for\n # overriding 'maxsplit' kwarg, default being -1 to signify no maximum.\n return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]", "def trim_to_upper_length_limit(self) -> None:\n self.trim_utils.lang_model = self.config['language_model']\n\n dataframe_splits = np.array_split(self.data, self.n_cores)\n pool = Pool(self.n_cores)\n self.data = pd.concat(pool.map(self.trim_text_for_dataframe, dataframe_splits))\n pool.close()\n pool.join()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It raises an error when trying to decrypt a nonencrypted value.
def test_decrypt_format(self): with pytest.raises(EncryptionError): decrypt('message')
[ "def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'", "def decrypt(ciphertext):\n return ciphertext", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def decrypt(self, data):", "def test_encrypt_key_invalid(unencrypted_text, item_uuid, encrypt_key, auth_key):\n with pytest.raises(ValueError):\n Cryptographer.encrypt(unencrypted_text, str(item_uuid), encrypt_key, auth_key)", "def decrypt_message(encrypted_message):", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def decrypt_fable():", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decryptor(byte_string: bytes, IV: bytes, key: bytes) -> bool:\n decrypted_string = AES_CBC_decrypt(byte_string, IV, key)\n print(len(decrypted_string), decrypted_string)\n if not check_ascii_compliance(decrypted_string):\n raise Exception(decrypted_string)", "async def decryption_failure(self, room: MatrixRoom, event: MegolmEvent) -> None:\n logger.error(\n f\"Failed to decrypt event '{event.event_id}' in room '{room.room_id}'!\"\n f\"\\n\\n\"\n f\"Tip: try using a different device ID in your config file and restart.\"\n f\"\\n\\n\"\n f\"If all else fails, delete your store directory and let the bot recreate \"\n f\"it (your reminders will NOT be deleted, but the bot may respond to existing \"\n f\"commands a second time).\"\n )\n\n user_msg = (\n \"Unable to decrypt this message. \"\n \"Check whether you've chosen to only encrypt to trusted devices.\"\n )\n\n await send_text_to_room(\n self.client,\n room.room_id,\n user_msg,\n reply_to_event_id=event.event_id,\n )", "def abort_failed_decryption():\r\n abort(400, message='Invalid GPG format or invalid passphrase')", "def assert_ciphertext(encrypted_number):\n if not isinstance(encrypted_number, phe.EncryptedNumber):\n raise ValueError(\"encrypted input is not a EncryptedNumber Type\")", "def test_decrypt(unencrypted_text, item_uuid, encrypt_key, auth_key):\n decrypted_string = Cryptographer.decrypt(\n Cryptographer.encrypt(unencrypted_text, str(item_uuid), encrypt_key, auth_key),\n encrypt_key,\n auth_key,\n )\n assert isinstance(decrypted_string, str)\n assert decrypted_string == unencrypted_text", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def test_decrypt_and_verify_my_secret(self):\n\n # happy path\n with open(self.my_cypherpath, 'r') as cypherfile:\n plaintext = decrypt_and_verify_file(cypherfile)\n\n # confirm assumptions\n self.assertEqual(plaintext, self.my_secret)\n\n # no signiture\n with open(self.unverified_cypherpath, 'r') as unverified_cypherfile:\n with self.assertRaises(NotTrustedError) as e:\n plaintext = decrypt_and_verify_file(unverified_cypherfile)\n self.assertEqual(str(e.exception), \"Invalid signiture\")\n\n # untrusted signiture\n with open(self.untrusted_cypherpath, 'r') as untrusted_cypherfile:\n with self.assertRaises(NotTrustedError) as e:\n plaintext = decrypt_and_verify_file(untrusted_cypherfile)\n self.assertEqual(str(e.exception),\n \"Untrusted Key (OAO Tech) not fully trusted\")\n\n # no encryption\n with open(self.plainpath, 'r') as plainfile:\n with self.assertRaises(DecryptionError):\n plaintext = decrypt_and_verify_file(plainfile)", "def test_val_unretrievable_bad_app_secret(self):\n with datastore.DatabaseConnection(filename=self.temp_file.name,\n file_path_abs=True) as db_con:\n\n key = 'myKey'\n val = 'myVal'\n bad_app_id = datastore.generate_app_id()\n db_con.store_key_val(app_id=self.app_id, app_secret=self.app_secret,\n key=key, val=val)\n\n with self.assertRaises(datastore.DecryptionFailError):\n db_con.get_key_val(\n app_id=bad_app_id, app_secret=self.app_secret, key=key)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It accepts a custom decryption key.
def test_decrypt_key(self): key = b'0' * 32 encrypted = encrypt('message', key=key) assert decrypt(encrypted, key=key) == 'message'
[ "def decrypt_key (key, tenant_id):\n try:\n key = RSA.importKey(key,tenant_id)\n unencrypted_key = key.exportKey('PEM')\n if isinstance(unencrypted_key, ValueError):\n raise NfvoException(\"Unable to decrypt the private key: {}\".format(unencrypted_key), httperrors.Internal_Server_Error)\n if isinstance(unencrypted_key, bytes):\n unencrypted_key = unencrypted_key.decode(encoding='UTF-8')\n except ValueError as e:\n raise NfvoException(\"Unable to decrypt the private key: {}\".format(e), httperrors.Internal_Server_Error)\n return unencrypted_key", "def decrypt_vigenere(ciphertext, keyword):\n pass # Your implementation here", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def decode_key(self, key: Any, msg: SecureMessage) -> Optional[bytes]:\n raise NotImplemented", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def decrypt(private_key, msg):\n return private_key.decrypt(msg)", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def decrypt(cypher, key):\n return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)", "def setup_key_decrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(\"Please enter the key that was used to encrypt your message.--> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key.\")\t\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def from_key(cls, key: PrivateKey) -> PrivateKey:", "def decrypt(self, ct_model_update, key='ct_weights', **kwargs):\n raise NotImplementedError", "def _decrypt(self, key, value):\n payload = EncryptedPayload.from_json(value)\n if not payload:\n return value\n\n decrypted = self._kms_crypto.decrypt_payload(payload)\n if not decrypted:\n return value\n\n key_prefix = '%s=' % key\n if not decrypted.startswith(key_prefix):\n return value\n\n return decrypted[len(key_prefix):]", "def __decode_key(self, key_type, key):\n return key_type(key, encoder=KEY_ENCODING)", "def decrypt_key(self, key, private_key):\n # type: (bytes, RSA.RSA) -> bytes\n iv = key[0:self.ivLength]\n secret = key[self.ivLength:]\n return iv + private_key.private_decrypt(secret, RSA.pkcs1_oaep_padding)", "def decrypt(self, key, device, private_key):\n device_key = base64.b64decode(self.keys[device.id.hex])\n\n master_key = private_key_decrypt(private_key, device_key)\n\n if master_key is None:\n return\n\n return fernet_decrypt(self.values[key], master_key, self.salt)", "def decrypt(private_key, ciphertext):\n return private_key.decrypt(\n ciphertext,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It reencrypts an encrypted message using a new key.
def test_rekey(self): old_key = b'0' * 32 new_key = b'1' * 32 old_encrypted = encrypt('message', key=old_key) new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key) assert decrypt(new_encrypted, key=new_key) == 'message'
[ "def rekey(self,newkeyid, oldkeyid):\n self.blob = _encrypt(self._decrypt(self.blob, self.key), keyid)\n self.keyid = keyid", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def reencrypt(self,\n rekey: Tuple[tuple, Tuple[bytes, bytes]],\n ciphertext: Tuple[bytes, bytes]\n ) -> Tuple[Tuple[bytes, bytes], Tuple[bytes, bytes]]:\n rk, encrypted_eph = rekey\n rk = umbral.RekeyFrag(rk[0], ec.deserialize(self.pre.ecgroup, rk[1]), pre=PRE)\n ekey, edata = ciphertext\n ekey = umbral.EncryptedKey(\n ekey=ec.deserialize(self.pre.ecgroup, ekey[0]), re_id=ekey[1])\n\n ekey = self.pre.reencrypt(rk, ekey)\n\n ekey = (ec.serialize(ekey.ekey), ekey.re_id)\n return (ekey, edata), encrypted_eph", "def rekey(self,\n pubkey: bytes) -> Tuple[tuple, Tuple[bytes, bytes]]:\n priv_eph = self.pre.gen_priv()\n rk = self.pre.rekey(self._priv_key, priv_eph)\n encrypted_eph = self.encrypt(ec.serialize(priv_eph), pubkey=pubkey)\n return ((rk.id, ec.serialize(rk.key)), encrypted_eph)", "def encrypt(message):\n encoded = encode(message)\n key = generate_key(encoded)\n encrypted = xor_function(encoded, key, \"encrypt\")\n print(\"Encrypted message: \" + str(encrypted))\n print(\"Key: \" + str(key))", "def encrypt(message, key):\n cipher = Salsa20.new(key)\n return cipher.nonce + cipher.encrypt(message.encode())", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)", "def update(self, plaintext):\n return self._encryptor.update(plaintext)", "def revert_key(self, key):\n return str(key).replace(self.replacement, self.replace)", "def rsa_encrypt(public_key, message):\n mime = message.split(\",\")[0]\n mess_data = message.split(\",\")[1]\n n = public_key[0]\n e = public_key[1]\n public_key = (n, e)\n cipher, time_taken_encrypt = RSA.encrypt(\n public_key, mess_data)\n return (mime + \",\" + cipher, time_taken_encrypt)", "def elgamal_encrypt(public_key, message):\n mime = message.split(\",\")[0]\n mess_data = message.split(\",\")[1]\n p = public_key[0]\n g = public_key[1]\n y = public_key[2]\n public_key = (p, g, y)\n cipher, time_taken_encrypt = ELGamal.encrypt(\n public_key, mess_data)\n cipher_string1, cipher_string2 = cipher\n return (mime + \",\" + cipher_string1, mime + \",\" + cipher_string2, time_taken_encrypt)", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def encrypt_ecb_block(message, key):\n if len(message) % len(key) != 0:\n message = set_two.implement_pkcs_padding(message, 16)\n encryptor = Cipher(\n AES(key), ECB(), backend=default_backend()\n ).encryptor()\n return encryptor.update(message) + encryptor.finalize()", "def decrypt_message(encrypted_message):", "def caesarShift(message, key, encrypt=True):\n message = message.lower().replace(' ', '')\n alphabet = string.ascii_lowercase\n newMessage = \"\"\n\n # Change shift direction depending on encrypting or decrypting\n if not encrypt:\n key = -key\n\n # Loop through the message\n for char in message:\n index = alphabet.find(char)\n newMessage += alphabet[(index + key) % 26]\n\n return newMessage", "def encrypt(message, key):\n # The IV should always be random\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(key, AES.MODE_CFB, iv)\n ciphertext = cipher.encrypt(message)\n return (ciphertext, iv)", "def encrypt(message, key):\r\n # --- YOU CODE STARTS HERE\r\n if type(message) != str or type(key) != int:\r\n return 'Invalid input'\r\n alpha_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n alpha_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n new_st = ''\r\n\r\n for x in message:\r\n if (alpha_lower.count(x) != 0) or (alpha_upper.count(x) != 0):\r\n if alpha_lower.count(x) != 0 and alpha_lower.index(x) + key < 26:\r\n new_st += alpha_lower[alpha_lower.index(x) + key]\r\n\r\n if alpha_upper.count(x) != 0 and alpha_upper.index(x) + key < 26:\r\n new_st += alpha_upper[alpha_upper.index(x) + key]\r\n\r\n if alpha_upper.count(x)!= 0 and alpha_upper.index(x) + key >= 26:\r\n new_st += alpha_upper[alpha_upper.index(x) + key - 26]\r\n\r\n if alpha_lower.count(x) != 0 and alpha_lower.index(x) + key >= 26:\r\n new_st += alpha_lower[alpha_lower.index(x) + key - 26]\r\n else:\r\n new_st += x\r\n\r\n return new_st\r\n\r\n # --- CODE ENDS HERE\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It raises an error when trying to rekey a nonencrypted value.
def test_rekey_non_encrypted(self): with pytest.raises(EncryptionError): rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)
[ "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def test_revoke_key(self):\n self.fail(\"test not implemented\")", "def test_encrypt_key_invalid(unencrypted_text, item_uuid, encrypt_key, auth_key):\n with pytest.raises(ValueError):\n Cryptographer.encrypt(unencrypted_text, str(item_uuid), encrypt_key, auth_key)", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def testFailingEncryption(self):\n g=gpg.GPG(gpg.findGPG())\n try:\n encrypted, warnings=g.encryptString(['ThisKeyDoesNotExist'],\n 'hello')\n fail(\"Expected encryption to fail, returned\\n\" + encrypted\n + \" with warnings:\\n\" + warnings)\n except gpg.EncryptionError, e:\n self.failUnless(str(e).find(\"public key not found\") > 0,\n \"Got the wrong error:\\n\" + str(e))", "def testDbKeyRotation(self):\r\n @contextmanager\r\n def _OverrideSecret(secret, secret_value):\r\n try:\r\n old_secret_value = secrets.GetSharedSecretsManager()._secrets[secret]\r\n secrets.GetSharedSecretsManager()._secrets[secret] = secret_value\r\n # Clear the cached crypter.\r\n if hasattr(_CryptValue, '_crypter'):\r\n del _CryptValue._crypter\r\n yield\r\n finally:\r\n secrets.GetSharedSecretsManager()._secrets[secret] = old_secret_value\r\n if hasattr(_CryptValue, '_crypter'):\r\n del _CryptValue._crypter\r\n\r\n # Encrypt a value using the original key.\r\n plaintext = 'quick brown fox'\r\n self._crypt_inst.Set(plaintext)\r\n\r\n # Add a new key to the keyset and make it primary and ensure that plaintext can still be recovered.\r\n writer = keyczar_dict.DictWriter(secrets.GetSharedSecretsManager()._secrets['db_crypt'])\r\n czar = keyczar.GenericKeyczar(keyczar_dict.DictReader(writer.dict))\r\n czar.AddVersion(keyinfo.PRIMARY)\r\n czar.Write(writer)\r\n\r\n with _OverrideSecret('db_crypt', json.dumps(writer.dict)):\r\n self.assertEqual(self._crypt_inst.Get().Decrypt(), plaintext)\r\n\r\n # Now remove old key and verify that plaintext cannot be recovered.\r\n czar.Demote(1)\r\n czar.Revoke(1)\r\n czar.Write(writer)\r\n with _OverrideSecret('db_crypt', json.dumps(writer.dict)):\r\n self.assertRaises(errors.KeyNotFoundError, self._crypt_inst.Get().Decrypt)", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def reraise_keyerror(err: BaseException, obj_id: str) -> NoReturn:\n if isinstance(err, IndexError):\n if isinstance(err.__cause__, NoKeyError):\n # Property.__getitem__ raises IndexError from\n # NoKeyError, so read from the original\n key_error = err.__cause__\n else:\n # We shouldn't have caught this\n raise err\n else:\n key_error = err\n raise Exception(\n 'No \"{key}\" in {id!s} object!'.format(\n key=key_error.key,\n id=obj_id,\n )\n ) from err", "def test_val_unretrievable_bad_app_secret(self):\n with datastore.DatabaseConnection(filename=self.temp_file.name,\n file_path_abs=True) as db_con:\n\n key = 'myKey'\n val = 'myVal'\n bad_app_id = datastore.generate_app_id()\n db_con.store_key_val(app_id=self.app_id, app_secret=self.app_secret,\n key=key, val=val)\n\n with self.assertRaises(datastore.DecryptionFailError):\n db_con.get_key_val(\n app_id=bad_app_id, app_secret=self.app_secret, key=key)", "def abort_failed_decryption():\r\n abort(400, message='Invalid GPG format or invalid passphrase')", "def test_renew_listen_key_without_key():\n\n client = Client(key)\n client.renew_listen_key.when.called_with(\"\").should.throw(ParameterRequiredError)", "def test_invalid_key(self):\n self.con.simulate_recv('JOIN #wrongkey somekey')\n reply = ':example.com 475 nick #wrongkey :Cannot join channel (+k)\\r\\n'\n self.assert_true(reply in self.con.sent_msgs,\n 'Not informed of bad key')", "def test_one_note_in_key(self) -> None:\n with self.assertRaises(ValueError):\n self.hash.notes(key=0x1)", "def test_invalid_keys(self):\n # mimic custom ``make_key`` method being defined since the default will\n # never show the below warnings\n def func(key, *args):\n return key\n\n old_func = self.cache._backend.key_func\n self.cache._backend.key_func = func\n\n try:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # memcached does not allow whitespace or control characters in\n # keys\n self.cache.set('key with spaces', 'value')\n self.assertEqual(len(w), 2)\n self.assertIsInstance(w[0].message, CacheKeyWarning)\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # memcached limits key length to 250\n self.cache.set('a' * 251, 'value')\n self.assertEqual(len(w), 1)\n self.assertIsInstance(w[0].message, CacheKeyWarning)\n finally:\n self.cache._backend.key_func = old_func", "def key_error_check(key):\n\n msg = \"input for argument 'key' is not a valid DRE.94 key (reason: {})\"\n\n # Check that key is of type str (if key is represented as list or tuple, problems occur in encryption/decryption)\n if type(key) != str:\n raise TypeError(msg.format(\"DRE.94 key must be represented as a string (type str)\"))\n\n # Check for correct key length\n if len(key) != KEY_LENGTH:\n raise ValueError(msg.format(f\"DRE.94 key must be of length {KEY_LENGTH}\"))\n\n # Check for character uniqueness\n for ch in key:\n if key.count(ch) != 1:\n raise ValueError(msg.format(f\"DRE.94 key must contain only distinct characters\"))\n\n # Check that key uses KEY_CHARMAP characters (ASCII 33 to 126)\n for ch in key:\n if ch not in KEY_CHARSET:\n raise ValueError(msg.format(f\"DRE.94 key must contain only ASCII characters 33 to 126, inclusive\"))", "def is_invalid(self, key): # pragma: no cover\n\t\traise NotImplementedError", "def rekey(self,newkeyid, oldkeyid):\n self.blob = _encrypt(self._decrypt(self.blob, self.key), keyid)\n self.keyid = keyid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It raises an error when given an invalid new key.
def test_rekey_key_format(self): old_key = b'0' * 32 encrypted = encrypt('message', key=old_key) with pytest.raises(EncryptionError): rekey(encrypted, old_key=old_key, new_key=b'1' * 31)
[ "def is_invalid(self, key): # pragma: no cover\n\t\traise NotImplementedError", "def _newKey(self, key):\n pass", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def keyIssue(issue,key,keySet):\n raise ValidationError(\"%s key %s from %s\" % (issue,key,keySet))", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def key_error_check(key):\n\n msg = \"input for argument 'key' is not a valid DRE.94 key (reason: {})\"\n\n # Check that key is of type str (if key is represented as list or tuple, problems occur in encryption/decryption)\n if type(key) != str:\n raise TypeError(msg.format(\"DRE.94 key must be represented as a string (type str)\"))\n\n # Check for correct key length\n if len(key) != KEY_LENGTH:\n raise ValueError(msg.format(f\"DRE.94 key must be of length {KEY_LENGTH}\"))\n\n # Check for character uniqueness\n for ch in key:\n if key.count(ch) != 1:\n raise ValueError(msg.format(f\"DRE.94 key must contain only distinct characters\"))\n\n # Check that key uses KEY_CHARMAP characters (ASCII 33 to 126)\n for ch in key:\n if ch not in KEY_CHARSET:\n raise ValueError(msg.format(f\"DRE.94 key must contain only ASCII characters 33 to 126, inclusive\"))", "def validate_key(self, key):\n\t\treturn key", "def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result", "def validate_keys(key):\n if key not in key_map:\n raise QueryParseError", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def test_invalid_keys(self):\n # mimic custom ``make_key`` method being defined since the default will\n # never show the below warnings\n def func(key, *args):\n return key\n\n old_func = self.cache._backend.key_func\n self.cache._backend.key_func = func\n\n try:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # memcached does not allow whitespace or control characters in\n # keys\n self.cache.set('key with spaces', 'value')\n self.assertEqual(len(w), 2)\n self.assertIsInstance(w[0].message, CacheKeyWarning)\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # memcached limits key length to 250\n self.cache.set('a' * 251, 'value')\n self.assertEqual(len(w), 1)\n self.assertIsInstance(w[0].message, CacheKeyWarning)\n finally:\n self.cache._backend.key_func = old_func", "def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def ensure_key(self, key):\r\n if not self.check(key):\r\n self.create_key(key)\r\n return True\r\n return False", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_duplicate_key_identifier(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n new_key = Key(\n key_identifier=list(bundle.keys)[0].key_identifier,\n key_tag=4711,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=base64.b64encode(b\"test key\"),\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(ValueError):\n validate_signatures(bundle)", "def sanity_check_key(self,key):\n\t\tif not isinstance(key,str):\n\t\t\traise ValueError('Key should be of type string')", "def validate_key(self, *args, **kwargs):\n return self._call_with_fallback(\"validate_key\", *args, **kwargs)", "def test_put_new_key(self):\n response = requests.put(\n self.endpoint + '/' + self.unique_key(), json=self.test_value)\n assert \"key doesn't exist\" in response.text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if domain is marked sensitive
def is_domain_sensitive(name): query = database.session_query(Domain) query = query.filter(and_(Domain.sensitive, Domain.name == name)) return database.find_all(query, Domain, {}).all()
[ "def sensitive(self) -> Optional[bool]:\n return pulumi.get(self, \"sensitive\")", "def is_sensitive(self):\n return self._is_sensitive", "def is_domain_explicit(self):\n\n return len(self.domain) != 2", "def is_sensitive(self) -> Optional[bool]:\n return pulumi.get(self, \"is_sensitive\")", "def is_domain(self):\n return self._is_domain", "def verify_domain_identity(Domain=None):\n pass", "def domainAuthModeEnabled(self):\n return getattr(self, '_domain_auth_mode', None)", "def is_domain_public(self) -> bool:\n return self.current_domain.public", "def sensitive(self):\n return self._sensitive", "def is_domain_token_auth(self):\n return self.is_valid() and bool(self.domain_token)", "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def condition(self, vhost, openssl: str = None, ignore_openssl=False):\n\n return (\n self.__key not in vhost\n or \"Strict-Transport-Security\" not in vhost[self.__key]\n ) # vulnerable if True", "def is_shared_with_domain(self):\n return self.has_label(SHAREDWITHDOMAIN_LABEL)", "def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped", "def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False", "def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1", "def secure(self):\n return self._secure", "def confidential(self):\n return self._confidential", "def secure(self):\n return self.__secure" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update an existing domain
def update(domain_id, name, sensitive): domain = get(domain_id) domain.name = name domain.sensitive = sensitive database.update(domain)
[ "def domain_update(self, domain):\n\t\treturn protocol.Request_DOMAIN_UPDATE(domain=domain)", "def update_domain_entry(domainName=None, domainEntry=None):\n pass", "def test_update_domain(self):\n pass", "def edit_domain(domain_name):\n\n if request.method == \"POST\":\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Check if domain.provider object exists to make sure\n # duplicate Provider.provider_url is not created\n provider = session.query(Provider).filter(\n Provider.provider_url == domain.provider.provider_url).first()\n if not provider:\n provider = Provider(\n provider_url=request.form[\"provider-url\"].strip())\n\n domain.category.category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"].strip()).first()\n\n domain.domain_name = parse_url(request.form[\"domain-name\"].strip())\n domain.ip = request.form[\"ip-address\"].strip()\n domain.provider.provider_url = parse_url(\n provider.provider_url.strip())\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Updated {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Problem with one of the fields.\".format(\n \"<strong>\", \"</strong>\")\n flash(message, \"danger\")\n return redirect(url_for(\"edit_domain\", domain_name=domain_name))\n\n if request.form[\"submit\"] == \"Save\":\n return redirect(url_for(\"view_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n return redirect(url_for(\"edit_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Obtain list of domain names without tuple to use\n # for domain_pager()\n domain_names = [d.domain_name for d in session.query(\n Domain.domain_name).order_by(Domain.domain_name).all()]\n next_domain, previous_domain = domain_pager(domain_name, domain_names)\n\n kwargs = {\n \"domain\": domain,\n \"domain_name\": domain_name,\n \"category_names\": category_names,\n \"next_domain\": next_domain,\n \"previous_domain\": previous_domain\n }\n return render_template(\"edit_domain.html\", **kwargs)", "def update_domain(self, index, domain):\n if isinstance(domain, Domain):\n self.__stack.update_domain(\n index,\n domain._get_raw()\n )\n logger.info(\n 'Updated domain {0} to {1}'.format(\n index,\n domain\n )\n )\n else:\n raise TypeError(\n 'Expected an instance of Domain'\n )", "def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)", "def test_update_domain_only(self):\n self.test_update()", "def refresh_domain(self):\n if self._domain.is_empty():\n return\n\n src_file = self._domain.get_source_file().get_path()\n\n try:\n self._domain = XMLDomainReader.extract_domain(src_file)\n self.change_settings(self._domain.get_settings())\n self.display_comment(\"Dialogue domain successfully updated\")\n except Exception as e:\n self.log.critical(\"Cannot refresh domain %s\" % e)\n self.display_comment(\"Syntax error: %s\" % e)\n self._domain = Domain()\n self._domain.set_source_file(src_file)", "def test_update_domain_admin_account(self):\n account = core_models.User.objects.get(username=\"admin@test.com\")\n url = reverse(\"v1:account-detail\", args=[account.pk])\n data = {\n \"username\": account.username,\n \"role\": account.role,\n \"password\": \"Toto1234\",\n \"mailbox\": {\n \"full_address\": account.mailbox.full_address,\n \"quota\": account.mailbox.quota\n },\n \"domains\": [\"test.com\", \"test2.com\"]\n }\n response = self.client.put(url, data, format=\"json\")\n self.assertEqual(response.status_code, 200)\n domains = models.Domain.objects.get_for_admin(account)\n self.assertEqual(domains.count(), 2)\n self.assertTrue(domains.filter(name=\"test2.com\").exists())\n\n data[\"domains\"] = [\"test2.com\"]\n response = self.client.put(url, data, format=\"json\")\n self.assertEqual(response.status_code, 200)\n domains = models.Domain.objects.get_for_admin(account)\n self.assertEqual(domains.count(), 1)\n self.assertTrue(domains.filter(name=\"test2.com\").exists())", "def update_domain_contact(DomainName=None, AdminContact=None, RegistrantContact=None, TechContact=None):\n pass", "def update(domain_name, username, password):\n logging.info(\"evaluating domain %s\", domain_name)\n\n public_address = get_public_address()\n logging.info(\"public IP address is %s\", public_address)\n\n domain_address = socket.gethostbyname(domain_name)\n logging.info(\"domain IP address is %s\", domain_address)\n\n if domain_address == public_address:\n logging.info(\"domain is up to date\")\n return False\n\n logging.info(\"IP address mismatch, will attempt update\")\n response = requests.post(\n url=UPDATE_URL,\n data={'hostname': domain_name, 'myip': public_address},\n auth=(username, password)\n )\n logging.info(\"response: %s %s\", response.status_code, response.reason)\n\n if response.status_code == 200:\n if response.text.startswith('good'):\n return True\n elif response.text.startswith('nochg'):\n return False\n else:\n raise OSError(f\"Update error: {response.text}\")\n else:\n raise OSError(f\"Request error: {response.reason}\")", "def change_the_Domain_for_ad_domain_and_click_Save(driver, ad_domain):\n global domain\n domain = ad_domain\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Domain\"]')\n # driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain\"]').clear()\n # driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain\"]').send_keys(ad_domain)\n assert wait_on_element(driver, 7, '//button[@ix-auto=\"button__SAVE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()", "def test_update_project_domain_id(self):\n project = self.new_project_ref(domain_id=self.domain['id'])\n self.assignment_api.create_project(project['id'], project)\n project['domain_id'] = CONF.identity.default_domain_id\n r = self.patch('/projects/%(project_id)s' % {\n 'project_id': project['id']},\n body={'project': project},\n expected_status=exception.ValidationError.code)\n self.config_fixture.config(domain_id_immutable=False)\n project['domain_id'] = self.domain['id']\n r = self.patch('/projects/%(project_id)s' % {\n 'project_id': project['id']},\n body={'project': project})\n self.assertValidProjectResponse(r, project)", "def ModifyDomain(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyDomain\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyDomainResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def domain_update(self, domain, contact_info, raw=True, **kwargs):\n endpoint = '/Domain/Update'\n\n params = {\n 'Domain' : domain\n }\n\n params.update(contact_info)\n params.update(kwargs)\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response['product'][0]['status'] == 'SUCCESS'", "def update_domain_nameservers(DomainName=None, FIAuthKey=None, Nameservers=None):\n pass", "def set_domain_id(self,new_domain_id):\r\n self.__domain_id=new_domain_id", "def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data", "def replace_domain(address, old_domain, new_domain):\n old_domain_pattern = r'' + old_domain + '$'\n address = re.sub(old_domain_pattern, new_domain, address)\n return address" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Establish a TCP connection to the indiserver via port 7624
def connect_to_indi(): indiclient=IndiClient() indiclient.setServer("localhost",7624) # Ensure the indiserver is running if (not(indiclient.connectServer())): print("No indiserver running on "+indiclient.getHost()+":"+str(indiclient.getPort())+" - Try to run") print(" indiserver indi_sx_ccd") sys.exit(1) return indiclient
[ "def connect(self):\n self._ctrl_socket.connect(self._host, 8888)", "def open(self):\n try:\n srvaddr = (socket.TIPC_ADDR_NAME,\n self.port,\n 5,\n 0)\n self.handle = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)\n except Exception as e:\n logging.error('TIPC is not supported: ' + e.__str__())\n raise e\n try:\n self.handle.settimeout(None)\n self.handle.connect(srvaddr)\n except socket.error as e:\n message = 'Could not connect to TIPC:%d' % self.port\n raise TTransportException(type=TTransportException.NOT_OPEN, message=message)", "def start_online():\n Client(580, 580, ip='116.203.85.179', port=5081).start()", "def SCPI_sock_connect(ipaddress,port=5025):\n\n try:\n session=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #session.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n #session.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, 0)\n session.connect((ipaddress,port))\n except IOError:\n print( \"Failed to connect to the instrument, pleace check your IP address\" )\n return\n return session", "def _connect_to_server(self, host_ip, port):\n return 0", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def do_connect(self, args):\n\t\tself.ise.connect(self.ise_param['ipAddress'], self.ise_param['username'], self.ise_param['password'], Port=self.ise_param['port'], reqTimeout=10)\n\t\tself.prompt = 'connected> '\n\t\tself.connected = True", "def _connect(self):\n# print('DEBUG: enter comm._connect',file=sys.stderr)\n self._reset()\n self._do_readin = True # set False to kill _readthread\n self._readthread.start()\n# print('DEBUG:about to handshake', file=sys.stderr)\n self._call_when_connected()", "def open(self):\n self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.device.connect((self.host, self.port))\n\n if self.device is None:\n print \"Could not open socket for %s\" % self.host", "def connect(self):\n self._socket.connect((self._ip, self._port))", "def connect(self):\n self.conn.connect()", "def opensock(ipaddr,port):\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((ipaddr,port))\n \n return s", "def connectToServer(self):\r\n\t\tself.rtspSocket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ttry:\r\n\t\t\tself.rtspSocket_client.connect((self.serverAddr, self.serverPort))\r\n\t\texcept:\r\n\t\t\tprint(\"Fail to connect to server\")", "def open_tcp_port():\n \n # Open an incoming tcp port to access the cluster endpoint\n try:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def _connect(self , cmd , data):\n md = {}\n if cmd is not None:\n md = dictFromCmd(cmd[2:])\n data = data[0]\n hostname = ''\n family = ''\n port = -1\n ip = ''\n if data:\n checkData(data , SMFIC_CONNECT)\n hostname , rem = readUntilNull(data[1:])\n family = rem[0]\n if family != SMFIA_UNKNOWN:\n port = unpack_uint16(rem[1:3])\n ip = rem[3:-1]\n return self.connect(hostname , family , ip , port , md)", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def open_device_connection(self):\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (self.device_ip_address, 41795)\n self.sock.settimeout(2.0)\n self.sock.connect(server_address)\n return True\n except:\n #print (\"exception thrown:\", sys.exc_info()[0])\n pass\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connection routine for the CCD (given below in ccd variable). The following CCD properties are accessed. More can be found by going to indilib.org. CONNECTION Switch CCD_EXPOSURE Number CCD1 BLOB CCD_BINNING Number CCD_ABORT_EXPOSURE Number CCD_TEMPERATURE Number CCD_COOLER Switch CCD_FRAME_TYPE Switch
def connect_to_ccd(): ccd="SX CCD SXVR-H694" device_ccd=indiclient.getDevice(ccd) while not(device_ccd): time.sleep(0.5) device_ccd=indiclient.getDevice(ccd) print("Searching for device...") print("Found device") ccd_connect=device_ccd.getSwitch("CONNECTION") while not(ccd_connect): time.sleep(0.5) ccd_connect=device_ccd.getSwitch("CONNECTION") if not(device_ccd.isConnected()): ccd_connect[0].s=PyIndi.ISS_ON # the "CONNECT" switch ccd_connect[1].s=PyIndi.ISS_OFF # the "DISCONNECT" switch indiclient.sendNewSwitch(ccd_connect) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") while not(ccd_exposure): time.sleep(0.5) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") # inform the indi server that we want to receive the # "CCD1" blob from this device indiclient.setBLOBMode(PyIndi.B_ALSO, ccd, "CCD1") ccd_ccd1=device_ccd.getBLOB("CCD1") while not(ccd_ccd1): time.sleep(0.5) ccd_ccd1=device_ccd.getBLOB("CCD1") # get access to setting the CCD's binning value ccd_bin=device_ccd.getNumber("CCD_BINNING") while not(ccd_bin): time.sleep(0.5) ccd_bin=device_ccd.getNumber("CCD_BINNING") # get access to aborting the CCD's exposure ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") while not(ccd_abort): time.sleep(0.5) ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") # get access to the CCD's temperature value ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") while not(ccd_temp): time.sleep(0.5) ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") # get access to switching the CCD's cooler on/off ccd_cooler=device_ccd.getSwitch("CCD_COOLER") while not(ccd_cooler): time.sleep(0.5) ccd_cooler=device_ccd.getSwitch("CCD_COOLER") # get access to switching the CCD's image frame type ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") while not(ccd_frame): time.sleep(0.5) ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") return ccd_exposure, ccd_ccd1, ccd_bin, ccd_abort, ccd_temp, ccd_cooler, ccd_frame
[ "def ccd(self):\n self.spectrum = self.spectrum", "def get_ccd_info(self, handle):\n # 'CCD_INFO_IMAGING' will get firmware version, and a list of readout modes (binning)\n # with corresponding image widths, heights, gains and also physical pixel width, height.\n ccd_info_params0 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_IMAGING'])\n ccd_info_results0 = GetCCDInfoResults0()\n\n # 'CCD_INFO_EXTENDED' will get bad column info, and whether the CCD has ABG or not.\n ccd_info_params2 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED'])\n ccd_info_results2 = GetCCDInfoResults2()\n\n # 'CCD_INFO_EXTENDED2_IMAGING' will get info like full frame/frame transfer, interline or\n # not, presence of internal frame buffer, etc.\n ccd_info_params4 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED2_IMAGING'])\n ccd_info_results4 = GetCCDInfoResults4()\n\n # 'CCD_INFO_EXTENDED3' will get info like mechanical shutter or not, mono/colour,\n # Bayer/Truesense.\n ccd_info_params6 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED3'])\n ccd_info_results6 = GetCCDInfoResults6()\n\n with self._command_lock:\n self.set_handle(handle)\n self._send_command('CC_GET_CCD_INFO',\n params=ccd_info_params0,\n results=ccd_info_results0)\n self._send_command('CC_GET_CCD_INFO',\n params=ccd_info_params2,\n results=ccd_info_results2)\n self._send_command('CC_GET_CCD_INFO',\n params=ccd_info_params4,\n results=ccd_info_results4)\n self._send_command('CC_GET_CCD_INFO',\n params=ccd_info_params6,\n results=ccd_info_results6)\n\n # Now to convert all this ctypes stuff into Pythonic data structures.\n ccd_info = {'firmware version': self._bcd_to_string(ccd_info_results0.firmwareVersion),\n 'camera type': camera_types[ccd_info_results0.cameraType],\n 'camera name': str(ccd_info_results0.name, encoding='ascii'),\n 'bad columns': ccd_info_results2.columns[0:ccd_info_results2.badColumns],\n 'imaging ABG': bool(ccd_info_results2.imagingABG),\n 'serial number': str(ccd_info_results2.serialNumber, encoding='ascii'),\n 'frame transfer': bool(ccd_info_results4.capabilities_b0),\n 'electronic shutter': bool(ccd_info_results4.capabilities_b1),\n 'remote guide head support': bool(ccd_info_results4.capabilities_b2),\n 'Biorad TDI support': bool(ccd_info_results4.capabilities_b3),\n 'AO8': bool(ccd_info_results4.capabilities_b4),\n 'frame buffer': bool(ccd_info_results4.capabilities_b5),\n 'dump extra': ccd_info_results4.dumpExtra,\n 'STXL': bool(ccd_info_results6.camera_b0),\n 'mechanical shutter': not bool(ccd_info_results6.camera_b1),\n 'colour': bool(ccd_info_results6.ccd_b0),\n 'Truesense': bool(ccd_info_results6.ccd_b1)}\n\n readout_mode_info = self._parse_readout_info(\n ccd_info_results0.readoutInfo[0:ccd_info_results0.readoutModes])\n ccd_info['readout modes'] = readout_mode_info\n\n return ccd_info", "def setCcdMode(*argv):", "def test_CORDIC(self, dBw=12, cordBw=16, cordBwInt=19, stQnt=17,\n rndQnt=int(1e4)):\n cord = CORDIC(dBw, cordBw, stQnt, cordBwInt)\n tb = cord.test_rtl(randQnt=rndQnt)\n tb.config_sim(trace=False)\n tb.run_sim()", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def continuous_capture_data(self, state ):\n self.continuous_capture_data = self.hwd.LP_ContCapture\n self.continuous_capture_data.restype = ctypes.c_int\n self.continuous_capture_data.argtypes = [ ctypes.c_int ]", "def cdi(self):\n from infapy.cdi import CDI\n infapy.log.info(\"Created the cdi object to access the iics cdi apis\")\n return CDI(self._v3,self._v2,self._v2BaseURL,self._v3BaseURL,self._v3SessionID,self._v2icSessionID)", "async def _raw_cdc_data(self) -> Dict[str, Any]:\n data = await self._request(\"get\", \"map/cdc\")\n return cast(Dict[str, Any], data)", "def _read_cardiochip(self):\n cur_leadstatus = 0\n sample_count =0\n while self.connected:\n sample_count+=1\n #check for sync bytes\n readbyte = ord(self.ser.read(1))\n #print readbyte, SYNC_BYTE\n if readbyte != SYNC_BYTE:\n continue\n readbyte = ord(self.ser.read(1))\n if readbyte != SYNC_BYTE:\n continue\n\n #parse length byte\n while True:\n pLength = ord(self.ser.read(1))\n if pLength != SYNC_BYTE:\n break\n if pLength > 169:\n continue\n #print \"L: %i\" % pLength\n\n # collect payload bytes\n payload = self.ser.read(pLength)\n payload = [ord(x) for x in payload] #convert to int from string\n #print \"payload: \" + str(payload).strip('[]')\n # ones complement inverse of 8-bit payload sum\n checksum = sum(payload) & 0xFF\n checksum = ~checksum & 0xFF\n\n # catch and verify checksum byte\n chk = ord(self.ser.read(1))\n #print \"chk: \" + str(checksum)\n if chk != checksum:\n print \"checksum error, %i != %i\" % (chk, checksum)\n continue\n\n output = self._parseData(payload)\n\n lead_status = next(( d for d in output if 'leadoff' in d), None)\n if lead_status is not None:\n if cur_leadstatus != lead_status['leadoff']:\n #we have a change\n if lead_status['leadoff']==200:\n print \"LEAD ON\"\n elif lead_status['leadoff']==0:\n print \"LEAD OFF\"\n cur_leadstatus = lead_status['leadoff']\n\n # store the output data in a queue\n # first, create a tuple with the sample index and dict with the timestamp and ecg\n ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)\n if ecgdict is not None and sample_count>self.Fs*2:\n #let's just ignore the first 2 seconds of crappy data\n ecgdict[1]['leadoff'] = cur_leadstatus\n #print ecgdict[1]\n self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys\n\n return", "def CCDpowerup(self):\n #starting drain voltages on CABAC\n drains = {\"OD\": 29, \"GD\": 24, \"RD\": 18}\n self.send_cabac_config(drains)\n\n time.sleep(1)\n\n #starting OG voltage on CABAC\n og = {\"OG\": 3.5}\n self.send_cabac_config(og)\n\n time.sleep(1)\n\n #sets clock rails\n dacs = {\"V_SL\": 0, \"V_SH\": 8.03, \"V_RGL\": 0, \"V_RGH\": 8.03, \"V_PL\": 0, \"V_PH\": 9.13}\n self.fpga.set_clock_voltages(dacs)\n\n time.sleep(1)\n\n #sets clock currents on CABAC\n iclock = {\"IC\": 255}\n self.send_cabac_config(iclock)\n\n time.sleep(1)\n\n #puts current on CS gate\n for stripe in self.stripes:\n self.fpga.set_current_source(0xfff, stripe)\n\n #rewrite default state of sequencer (to avoid reloading functions)\n self.fpga.send_function(0, self.seq.get_function(0))\n\n time.sleep(0.1)\n\n #now is the time to the backsubstrate voltage (elsewhere)\n print(\"CCD start-up sequence complete on REB, ready for Back Substrate.\")", "def configure_cdp(device):\r\n device.configure(['cdp'])", "def digitalDoSet(self, Startchannel_width=None, Data=0, Debug=0): \n # void DapiDOSet1(ULONG handle, ULONG ch, ULONG data)\n self.bib.DapiDOSet1.argtypes = [c_ulong, c_ulong, c_ulong]\n self.bib.DapiDOSet1.restype = None # void \n # void DapiDOSet8(ULONG handle, ULONG ch, ULONG data \n self.bib.DapiDOSet8.argtypes = [c_ulong, c_ulong, c_ulong]\n self.bib.DapiDOSet8.restype = None # void \n # void DapiDOSet16(ULONG handle, ULONG ch, ULONG data) \n self.bib.DapiDOSet16.argtypes = [c_ulong, c_ulong, c_ulong]\n self.bib.DapiDOSet16.restype = None # void \n # \n if Startchannel_width in range(0,16): # is form 0..15\n if Data in range(0,2): # 0 or 1 or False/ True\n if Debug == 0:\n self.bib.DapiDOGet1(self.handle, Startchannel_width, Data)\n return(0)\n else:\n self.bib.DapiDOSet1(self.handle, Startchannel_width, Data)\n return( print(\"Digital Outupt:\",Startchannel_width,\":\" ,Data) )\n else:\n return(1)\n \n elif Startchannel_width == \"Low\":\n if Data in range(0,256): # 0..255 bei 8 bit!\n Startchannel = 0\n #Startchannel = 0\n #Stopchannel = 7\n if Debug == 0:\n self.bib.DapiDOSet8(self.handle, Startchannel, Data)\n return(0)\n else:\n self.bib.DapiDOSet8(self.handle, Startchannel, Data)\n print( \"Digital OutByte\",Startchannel_width,\": {:08b}\".format(Data)) # beginnt bei MOS1 !!\n return(0)\n else:\n return(1)\n \n elif Startchannel_width == \"High\":\n if Data in range(0,256): # 0..255 bei 8 bit!\n Startchannel = 8\n #Startchannel = 8\n #Stopchannel = 15\n if Debug == 0:\n self.bib.DapiDOSet8(self.handle, Startchannel, Data)\n return(0)\n else:\n self.bib.DapiDOSet8(self.handle, Startchannel, Data)\n print( \"Digital OutByte\",Startchannel_width,\": {:08b}\".format(Data))\n return(0)\n else:\n return(1)\n \n elif Startchannel_width == None or Startchannel_width == \"All\":\n Startchannel = 0\n if Startchannel_width == None: \n Startchannel_width = 'RESET'\n #Startchannel = 0\n #Stopchannel = 15\n if Debug == 0:\n self.bib.DapiDOSet16(self.handle, Startchannel, Data)\n return(0)\n else:\n self.bib.DapiDOSet16(self.handle, Startchannel, Data)\n return( print( \"Digital OutWord\",Startchannel_width,\"{:016b}\".format(Data)) )\n else:\n print(\"No vaild digital input (DI) channel configuration..!\")\n self.lastError()\n self.lastErrorText()\n return(1)", "def control_change(self, channel, cc, value):\n knob, bank = self.decode_mpd218_cc(cc)\n log.debug(\"Winch control change %d on knob %d bank %d\", cc, knob, bank)\n\n if knob == 1: # Knob #1 on MPD218, use to control resonant frequency\n #self.frequency = 0.05 + 0.1 * value\n self.frequency = 5.00\n self.set_freq_damping()\n\n elif knob == 2: # Knob #2 on on MPD218, use to control damping ratio\n #self.damping_ratio = 0.05 + 0.01 * value\n self.damping_ratio = 1.32\n self.set_freq_damping()", "def _get_cbase(self):\n from PSCalib.CalibParsBasePnccdV1 import CalibParsBasePnccdV1\n return CalibParsBasePnccdV1()", "def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc", "def init_comedi():\n \n ##\n ## Okay, now also load the sensor reading\n ##\n #open a comedi device\n conf[\"comedi.dev\"] =comedi.comedi_open(conf['COMEDI_DEVICE'])\n if not conf[\"comedi.dev\"]: raise Exception(\"Error opening Comedi device\") # This can happen if you do not have sufficient read privileges to /dev/comedi0, try running with sudo\n \n #get a file-descriptor for use later\n conf[\"comedi.fd\"] = comedi.comedi_fileno(conf[\"comedi.dev\"])\n if conf[\"comedi.fd\"]<=0: raise Exception(\"Error obtaining Comedi device file descriptor\")\n\n print(\"\\nCOMEDI loaded (for reading force sensors)\")\n \n conf[\"comedi.freq\"]=1000 # sampling frequency (Hz I hope)\n subdevice=0 # no idea\n conf[\"comedi.subdev\"]=subdevice\n\n NCHANNEL = conf['COMEDI_NCHANNEL'] # how many channels we are reading (TODO)\n\n #three lists containing the chans, gains and referencing\n #the lists must all have the same length\n chans = range(NCHANNEL);#[0,1,2,3]\n gains = [conf['COMEDI_RANGE']]*NCHANNEL #[0,0,0,0] # This defines the measurement range. This is device dependent, and we checked that this means -10 V to +10 V.\n aref = [comedi.AREF_GROUND]*NCHANNEL #, c.AREF_GROUND, c.AREF_GROUND, c.AREF_GROUND]\n nchans = NCHANNEL #len(chans) #number of channels\n\n #wrappers include a \"chanlist\" object (just an Unsigned Int array) for holding the chanlist information\n mylist = comedi.chanlist(nchans) #create a chanlist of length nchans\n\n #now pack the channel, gain and reference information into the chanlist object\n #N.B. the CR_PACK and other comedi macros are now python functions\n for index in range(nchans):\n mylist[index]=comedi.cr_pack(chans[index], gains[index], aref[index])\n\n conf[\"comedi.size\"] = comedi.comedi_get_buffer_size(conf[\"comedi.dev\"], subdevice)\n print(\"Comedi buffer size is %d\"% conf[\"comedi.size\"])\n conf[\"comedi.map\"] = mmap.mmap(conf[\"comedi.fd\"], conf[\"comedi.size\"], mmap.MAP_SHARED, mmap.PROT_READ)\n ##print(\"map = \", map)\n\n cmd = comedi.comedi_cmd_struct()\n cmd.chanlist = mylist # adjust for our particular context\n cmd.chanlist_len = nchans\n cmd.scan_end_arg = nchans\n prepare_cmd(conf[\"comedi.dev\"],subdevice,cmd,conf[\"comedi.freq\"],nchans,mylist)\n conf[\"comedi.cmd\"]=cmd\n\n\n ## Get the maximum data value -- we have checked that this is the same for all channels actually\n conf['comedi.maxdata'] = comedi.comedi_get_maxdata(conf['comedi.dev'],subdevice,0)\n\n ## The measurement range we use\n conf['comedi.range'] = comedi.comedi_get_range(conf['comedi.dev'],subdevice,0,conf[\"COMEDI_RANGE\"])\n #print([ (rng.min,rng.max) for rng in ranges])", "def connect_dmm2110():\n address = 'USB0::0x05E6::0x2110::8010814::INSTR'\n rm = visa.ResourceManager()\n return rm.open_resource(address)", "def setCSPadConfigurationFromOpenFile( self, h5file, dsname, event=0 ):\n if gm.CSpad2x2ElementIsInTheName(dsname) :\n print 'getCSpadConfiguration(...): This is a CSpad2x2Element. Special configuration is not required'\n self.isCSPad2x2 = True\n return\n\n self.h5file = h5file\n self.quadNumsInEvent = self.getQuadNumsInEvent( dsname, event )\n self.indPairsInQuads = self.getIndPairsInQuads( dsname )\n #self.printCSPadConfigPars()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the last numbered image in the current directory.
def last_image(fileDir): lastNum = 0 lastImg = '' # find the name and number of the last image in the current directory for f in os.listdir(fileDir): if os.path.isfile(os.path.join(fileDir, f)): file_name = os.path.splitext(f)[0] file_name2 = file_name[4:] try: file_num = int(file_name2) if file_num > lastNum: lastNum = file_num lastImg = os.path.join(fileDir, f) except ValueError: 'The file name "%s" is not an integer. Skipping' % file_name return lastNum, lastImg
[ "def get_last_counter():\n counter = imageNumStart\n if imageNumOn:\n image_ext = \".jpg\"\n search_str = imagePath + \"/*\" + image_ext\n file_prefix_len = len(imagePath + imageNamePrefix)+1\n try:\n # Scan image folder for most recent jpg file\n # and try to extract most recent number counter from file name\n newest = max(glob.iglob(search_str), key=os.path.getctime)\n count_str = newest[file_prefix_len:newest.find(image_ext)]\n print(\"%s INFO : Last Saved Image is %s Try to Convert %s\"\n % (get_now(), newest, count_str))\n counter = int(count_str)+1\n print(\"%s INFO : Next Image Counter is %i\"\n % (get_now(), counter))\n except:\n print(\"%s WARN : Restart Numbering at %i \"\n \"WARNING: Previous Files May be Over Written.\"\n % (get_now(), counter))\n return counter", "def _get_latest_inc(path):\n\n images = [os.path.join(path, image) for image in os.listdir(path) if '.png' in image]\n\n if not images:\n return 0\n else:\n return int(re.search('(?P<inc>\\d+).png$', max(images, key=os.path.getctime)).group('inc'))", "def last_photo(id):\n\t\n\tdirectorio = 'dataSet/images/p' + str(id)\n\tlista = os.listdir(directorio)\n\tnumero_arch = len(lista)\n\treturn numero_arch", "def _get_last_file_index(self, group_name):\n current_files = self._collect_revision_files(group_name)\n if current_files:\n name_pattern = self._build_rev_name_pattern(group_name=group_name)\n last_file_name = os.path.basename(current_files[-1])\n last_index_match = re.match(name_pattern, last_file_name)\n if last_index_match:\n return int(last_index_match.group(1))\n\n return 0", "def determine_output_ending():\n file_found = False\n idx = 1\n while not file_found:\n if not os.path.isfile(LOG_DIR + \"/output%04d.png\" % (idx)):\n return \"%04d\" % (idx)\n idx += 1", "def get_last_iteration(dirname):\n return sorted([int(x[3:]) for x in os.listdir(dirname) if x[:3] == 'td.' and x[-1].isdigit()])[-1]", "def read_last_iteration_number(directory):\n\tfile = open(directory + 'output.txt', 'r')\n\tfile = file.readlines()\n\tline = file[len(file) - 1]\n\treturn line.split()[0]", "def get_last_file(directory, index_file):\n if os.path.exists(os.path.join(directory, index_file)):\n f = open(os.path.join(directory, index_file))\n data = f.read()\n f.close()\n last_file = os.path.join(directory, data.split('\\n')[-2])\n else:\n last_file = ''\n return last_file", "def get_latest_iteration(path):\n glob = os.path.join(path, '{}_[0-9]*'.format(FILE_PREFIX))\n log_files = tf.io.gfile.glob(glob)\n\n if not log_files:\n raise ValueError('No log data found at {}'.format(path))\n\n def extract_iteration(x):\n return int(x[x.rfind('_') + 1:])\n\n latest_iteration = max(extract_iteration(x) for x in log_files)\n return latest_iteration", "def get_max_imgid(cursor: db.Cursor, table: str) -> int:\r\n res = cursor.execute(f\"SELECT MAX({cng.BBOX_DB_IMGRNR}) FROM {table}\")\r\n maxid: int = res.fetchall()[0][0]\r\n\r\n if maxid is None:\r\n return -1\r\n else:\r\n return maxid", "def last_capture():\n capture_files = list_captures()\n #print 'File: %s' % capture_files[-1]\n return capture_files[-1]", "def get_latest_image():\n return sqldb.get_latest_image()", "def _frame_number(self, image_path):\n image_path = image_path.replace('\\\\','/')\n return int(image_path.split('/')[-1].split('.jpg')[0])", "def last_modified_number(dir_name, glob):\n files = pathlib.Path(dir_name).glob(glob)\n files = sorted(files, key=lambda cp:cp.stat().st_mtime)\n\n if len(files) > 0:\n # Get number from filename\n regex = re.compile(r'\\d+')\n numbers = [int(x) for x in regex.findall(str(files[-1]))]\n assert len(numbers) == 1, \"Could not determine number from last modified file\"\n last = numbers[0]\n\n return last\n\n return None", "def get_latest_image_from_directory(self, motion_target_dir):\n try:\n # Use a glob generator to find the newest image\n return max(glob.iglob('{0}/*.jpg'.format(motion_target_dir)),\n key=os.path.getctime)\n except ValueError as e:\n # Raise an error if we did not find any images\n raise MotionAlertError(\"Could not find any images in motion \"\n \"target directory: \"\n \"{0}\".format(motion_target_dir))\n except OSError as e:\n # Raise an error if we cannot access the directory.\n raise MotionAlertError(\"Could not find the motion target dir: \"\n \"{0}\".format(e))", "def _find_thumbnail_path():\n directory = git_repository_root()\n if not directory:\n return None\n paths = directory.glob(\"**/thumbnail.png\")\n paths = [path.relative_to(directory) for path in paths]\n paths = sorted(paths, key=lambda x: (len(x.parents), x))\n if not paths:\n return None\n return paths[0].as_posix()", "def imageBuildNumber(image):\n try:\n return image.rsplit(':', 1)[1]\n except IndexError:\n return 0\n except AttributeError:\n return 0", "def get_output_number(dst):\n data = os.listdir(dst)\n print(data)\n if not data == []:\n last_record = sorted(data)[-1]\n print(last_record)\n hiphen_index = last_record.rfind(\"-\")\n print(hiphen_index)\n print(int(last_record[hiphen_index + 1:]))\n return int(last_record[hiphen_index + 1:])\n return 0", "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends an exposure command to the CCD given the type of frame and exposure time. The received BLOB is of FITS type and is
def exposure(frameType, expTime): blobEvent.clear() # set the specified frame type if frameType.lower() == 'light': ccd_frame[0].s = PyIndi.ISS_ON ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'bias': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_ON ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'dark': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_ON ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'flat': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_frame) # set the value for the next exposure ccd_exposure[0].value=expTime indiclient.sendNewNumber(ccd_exposure) # wait for the exposure blobEvent.wait() for blob in ccd_ccd1: # pyindi-client adds a getblobdata() method to IBLOB item # for accessing the contents of the blob, which is a bytearray in Python image_data=blob.getblobdata() # write the byte array out to a FITS file global imgNum global imgName imgNum += 1 fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits' f = open(fileName, 'wb') f.write(image_data) f.close() imgName = fileName return fileName
[ "def expose(self, cmd, expTime, expType, filename):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n # The expTime unit is ms.\n time.sleep((expTime / 1000.0) + self._exposureOverheadTime())\n\n # Command camera to do exposure sequence.\n p = sub.Popen(['rmodexposure', '-f', filename], stdout=sub.PIPE, stderr=sub.PIPE)\n output, errors = p.communicate()\n if (output == 'done'):\n cmd.inform('exposureState=\"done\"')\n\n if cmd:\n cmd.inform('exposureState=\"reading\"')\n\n f = pyfits.open('/home/chyan/mhs/data/mcs/schmidt_fiber_snr400_rmod71.fits')\n image = f[0].data\n # image = numpy.random.normal(self.biasLevel,\n # scale=self.readNoise,\n # size=self.imageSize).astype('u2')\n\n if expType != 'test':\n time.sleep(self._readoutTime())\n return image", "def tcs_exposure_request(image_type, duration = 0, number = 1):\n\n\tvalid_types = ['THERMAL','DARK', 'BIAS', 'FLAT','OBJECT']\n\tvalid = image_type in valid_types\n\n\tif valid:\n\t\timage_type = image_type.lower()\n\t\tif image_type == 'dark':\n\t\t\timage_type = 'thermal'\n\n\t\tif number < 1:\n\t\t\tlogger.error('Invalid number of exposures requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tif duration <0:\n\t\t\tlogger.error('Invalid exposure time requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tcommand_str = 'expose ' + image_type\n\t\tif number != 1:\n\t\t\tcommand_str += ' '+str(number)\n\t\tif image_type != 'bias':\n\t\t\tcommand_str += ' ' + str(duration)\n\t\t\n\t\ttry:\n\t\t\ttcs_respond = send_command(command_str)\n\t\t\n\t\texcept:\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\telse:\n\t\t\t\n\t\t\tcam_temp = get_camera_status()[2]\n\t\t\t#if good_response and cam_temp>-20:\n\t\t\tif float(cam_temp)>-20:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_CCD_WARM\n\t\n\t\t\telse:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_OK\n\t\t\t\n\t\treturn respond\n\n\telse:\n\t\tlogger.error('Invalid image type provided to exposure request '+str(\n\t\t\t\timage_type))\n\t\tprint('Invalid image type provided to exposure request'+str(\n\t\t\timage_type))", "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def start_exposure(self, exp_time, rows, cols, rowbin, colbin, rowcen, colcen):\n self.nRows = rows\n self.nCols = cols\n self.expTime = exp_time\n self.rowBin = rowbin\n self.colBin = colbin\n print(\"Calling startExposure with {} {} {} {} {} {} {}\".\n format(exp_time, \n rows, cols, \n rowbin, colbin, \n rowcen, colcen))\n self.client.startExposure(exp_time, \n rows, cols, \n rowbin, colbin, \n rowcen, colcen)", "def getImage(self, exposure=None, binsize=None, **kwargs) -> np.ndarray:\n if exposure is None:\n exposure = self.default_exposure\n if not binsize:\n binsize = self.default_binsize\n\n # Upload exposure settings (Note: will do nothing if no change in settings)\n self.conn.set_detector_config(ExposureTime=exposure,\n TriggerPeriod=exposure + 0.00050001)\n\n # Check if measurement is running. If not: start\n db = self.conn.dashboard\n if db['Measurement'] is None or db['Measurement']['Status'] != 'DA_RECORDING':\n self.conn.measurement_start()\n\n # Start the acquisition\n self.conn.trigger_start()\n\n # Request a frame. Will be streamed *after* the exposure finishes\n img = self.conn.get_image_stream(nTriggers = 1, disable_tqdm = True)[0]\n arr = np.array(img)\n return arr", "async def integrate(self, exposure_time=1):\n if not self.status == ControllerStatus.IDLE:\n raise ArchonError(\"Status must be IDLE to start integrating.\")\n\n await self.set_param(\"IntMS\", int(exposure_time * 1000))\n await self.set_param(\"Exposures\", 1)\n\n self.status = ControllerStatus.EXPOSING", "def set_exposure_time(self, exposure_ms):\n exposure_ms_double = ctypes.c_double(exposure_ms)\n nRet = ueye.is_Exposure(\n self.hCam,\n ueye.IS_EXPOSURE_CMD_SET_EXPOSURE,\n exposure_ms_double,\n ctypes.sizeof(exposure_ms_double),\n )\n if nRet != ueye.IS_SUCCESS:\n raise RuntimeError(\"IS_EXPOSURE_CMD_SET_EXPOSURE failed\")\n actual = exposure_ms_double.value\n if actual != exposure_ms:\n print(\"Warning: actual value of exposure time is\", actual, \"ms\")", "def singleShot(self, newExpos=None):;\n\n\t\t#To put camera in the \"camera mode\"\n\t\tself.detector.setCameraInProgress(False);\n\n\t\tif self.getStatus() != Detector.IDLE:\n\t\t\tprint 'Camera not available, please try later';\n\t\t\treturn;\n\n\t\tif newExpos is not None: # New exposure time given\n\t\t\tself.exposureTime = newExpos;\n\n\t\tself.setCollectionTime(self.exposureTime);\n\n\t\tself.detector.setCameraSequentialMode(True);\n\t\t#self.setNumOfImages(1);\n\t\tself.collectData();\n\n\t\tsleep(self.exposureTime);\n\t\twhile self.getStatus() != Detector.IDLE:\n\t\t\tsleep(self.exposureTime/10.0);\n\n\t\tif self.protection:\n\t\t\tself.protectCamera();\n\n\t\treturn self.readout();", "def factor_exposure(asset: Asset, risk_model_id: str, factor_name: str, *,\n source: str = None, real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n risk_model = RiskModel(risk_model_id)\n factor = Factor(risk_model_id, factor_name)\n if factor.factor is None or risk_model_id != factor.risk_model_id:\n raise MqValueError('Requested factor not available in requested risk model')\n\n asset_gsid = asset.get_identifiers().get('GSID')\n\n # Establish date interval for data query\n dates = risk_model.get_dates()\n start_date = dt.datetime.strptime(min(dates), \"%Y-%m-%d\").date() if dates else None\n end_date = dt.datetime.strptime(max(dates), \"%Y-%m-%d\").date() if dates else None\n\n # Query data and append pull requested factor exposure\n all_exposures = []\n query_results = risk_model.get_data(\n measures=[Measure.Factor_Name, Measure.Universe_Factor_Exposure, Measure.Asset_Universe],\n start_date=start_date,\n end_date=end_date,\n assets=DataAssetsRequest(identifier=AssetUniverseIdentifier.gsid, universe=[asset_gsid])).get('results', [])\n for result in query_results:\n if result.get('date') in dates:\n exposures = result.get('assetData', {}).get('factorExposure', [])\n if exposures:\n all_exposures.append(\n {'date': result['date'],\n 'factorExposure': exposures[0].get(factor.factor.identifier)})\n\n # Create and return timeseries\n df = pd.DataFrame(all_exposures)\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, QueryType.FACTOR_EXPOSURE)", "def set_exposure_time(self, exposure_time):\n\n if exposure_time <= 4e-3:\n time = int(exposure_time * 1e9)\n timebase = 'ns'\n\n elif exposure_time <= 4:\n time = int(exposure_time * 1e6)\n timebase = 'us'\n\n elif exposure_time > 4:\n time = int(exposure_time * 1e3)\n timebase = 'ms'\n\n else:\n raise\n\n self.sdk.set_delay_exposure_time(0, 'ms', time, timebase)", "def set_exposure_time(self, seconds):\n self.camera.ExposureMode = \"Timed\"\n self.camera.ExposureTime = seconds * 1e6", "def setExposure(value, cam='/dev/video0'):\n\t\ttry:\n\t\t\tsubprocess.call( 'uvcdynctrl -d $(realpath {0}) -s \"Exposure, Auto\" 1'.format(cam), shell=True)\n\t\t\tsubprocess.call( 'uvcdynctrl -d $(realpath {0}) -s \"Exposure, Auto Priority\" 0'.format(cam), shell=True)\n\t\t\tsubprocess.call( 'uvcdynctrl -d $(realpath {0}) -s \"Exposure (Absolute)\" {1}'.format(cam, value), shell=True)\n\t\t\tprint('Set exposure of {0} to {1}'.format(cam, value))\n\t\texcept (subprocess.CalledProcessError) as e:\n\t\t\tprint('Failed to set exposure of {0} to {1}:\\n{2}'.format(cam, value, e.output))", "def create_exposure(event_class,event_type,egy,cth):\n\n if isinstance(event_type,int):\n event_type = evtype_string[event_type]\n \n irf_factory=pyIrfLoader.IrfsFactory.instance()\n irf = irf_factory.create('%s::%s'%(event_class,event_type))\n\n irf.aeff().setPhiDependence(False)\n \n theta = np.degrees(np.arccos(cth))\n \n # Exposure Matrix\n # Dimensions are Etrue and incidence angle\n m = np.zeros((len(egy),len(cth)))\n\n for i, x in enumerate(egy):\n for j, y in enumerate(theta): \n m[i,j] = irf.aeff().value(10**x,y,0.0)\n\n return m", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def select_exposure(self):\n exp1_selected = self.exp1_radio.isChecked()\n\n if self.recording_sequence:\n self.record_sequence() # stop current recording\n\n if exp1_selected: # then exp1\n ifi_ndx = self.exp1_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp1_select.currentIndex(), ifi_ndx)\n else:\n ifi_ndx = self.exp2_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp2_select.currentIndex(), ifi_ndx)\n\n temp = list(self.dpar.iwindow_toggle_save)\n self.dpar.iwindow_toggle_save = list(self.dpar.iwindow[0])\n self.dpar.iwindow[0] = temp\n self._update_scrollbars()\n\n self.rec_seq_button.setEnabled(ifi_ndx > 0)\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def pollCamera(self):\n expState = self.camera.getExposureState()\n if expState.state == arctic.Reading and not self.readingFlag:\n\n self.elapsedTime = time.time() - self.expStartTime\n\n \n #moved dark time here... elapsed time for dark time matters and should not care about pausing (calculated below)\n self.darkTime = self.elapsedTime #according to russets emails it seems like dark time is really j ust full exposure time as I calculated it.\n\n #PR 1911 fix, handling special exceptions to exposure\n if self.wasPaused:\n self.elapsedTime = self.elapsedTime - self.expTimeTotalPause\n #let use see what kind of exposure this was. \n #if it is a bias, then this expTime is really darktime. and expTime is 0\n #if it is a dark then dark time should be the full exposure time\n # HANDLE DARK AND BIAS TIMES\n\n \n if self.wasStoppedAborted: #if the user aborts the exposure or stops it, then we want to get what the real exposure time was.\n self.expTime = self.elapsedTime #over write the requested expTime with this time\n\n \n\n self.expTimeTotalPause = 0\n self.readingFlag = True\n self.writeToUsers(\"i\", \"shutter=closed\") # fake shutter\n # self.startReadTime = time.time()\n self.writeToUsers(\"i\", self.exposureStateKW, self.exposeCmd)\n \n if expState.state == arctic.ImageRead:\n log.info(\"saving image: exposure %s\"%self.expName)\n self.camera.saveImage() # saveImage sets camera exp state to idle\n # write headers\n self.writeHeaders()\n # clean up\n log.info(\"exposure %s complete\"%self.expName)\n self.exposeCleanup()\n elif expState.state == arctic.Idle:\n log.warn(\"pollCamera() called but exposure state is idle. Should be harmless, but why did it happen?\")\n self.exposeCleanup()\n else:\n # if the camera is not idle continue polling\n self.pollTimer.start(0.05, self.pollCamera)", "def store_frame(frame_id, td, conn, exp, verbose=False, compressFrame=True):\r\n # Get a projected mass spectrum:\r\n q = conn.execute(\"SELECT NumScans, Time, Polarity, MsMsType FROM Frames WHERE Id={0}\".format(frame_id))\r\n tmp = q.fetchone()\r\n num_scans = tmp[0]\r\n time = tmp[1]\r\n pol = tmp[2]\r\n msms = int(tmp[3])\r\n\r\n center = -1\r\n width = -1\r\n mslevel = 1\r\n if msms == 2:\r\n q = conn.execute(\"SELECT TriggerMass, IsolationWidth, PrecursorCharge, CollisionEnergy FROM FrameMsMsInfo WHERE Frame={0}\".format(frame_id))\r\n tmp = q.fetchone()\r\n center = float(tmp[0])\r\n width = float(tmp[1])\r\n mslevel = 2\r\n\r\n if verbose:\r\n print \"mslevel\", mslevel, msms\r\n\r\n # Get the mapping of the ion mobility axis\r\n scan_number_axis = np.arange(num_scans, dtype=np.float64)\r\n ook0_axis = td.scanNumToOneOverK0(frame_id, scan_number_axis)\r\n\r\n allmz = []\r\n allint = []\r\n allim = []\r\n\r\n # Traverse in reversed order to get low ion mobilities first\r\n for k, scan in reversed(list(enumerate(td.readScans(frame_id, 0, num_scans)))):\r\n index = np.array(scan[0], dtype=np.float64)\r\n mz = td.indexToMz(frame_id, index)\r\n intens = scan[1]\r\n drift_time = ook0_axis [k] \r\n if compressFrame:\r\n allmz.append(mz)\r\n allint.append(intens)\r\n allim.append([drift_time for k in mz])\r\n continue\r\n\r\n # Store data in OpenMS Spectrum file -> each TOF push is an individual\r\n # spectrum and we store the ion mobility in the precursor. The frame\r\n # can be reconstructed by grouping all spectra with the same RT.\r\n s = pyopenms.MSSpectrum()\r\n s.setMSLevel(mslevel)\r\n s.set_peaks( (mz, intens) ) \r\n s.setRT(time)\r\n p = pyopenms.Precursor()\r\n p.setDriftTime(drift_time)\r\n if msms == 2:\r\n p.setMZ(center)\r\n p.setIsolationWindowUpperOffset(width / 2.0)\r\n p.setIsolationWindowLowerOffset(width / 2.0)\r\n s.setPrecursors([p])\r\n exp.consumeSpectrum(s)\r\n\r\n\r\n if compressFrame:\r\n mz = np.concatenate(allmz)\r\n intens = np.concatenate(allint)\r\n ims = np.concatenate(allim)\r\n # print \" leeen\", len(mz), len(intens)\r\n\r\n fda = pyopenms.FloatDataArray()\r\n fda.setName(\"Ion Mobility\")\r\n fda.resize(len(mz))\r\n for k,val in enumerate(ims):\r\n fda[k] = val\r\n\r\n sframe = pyopenms.MSSpectrum()\r\n sframe.setMSLevel(mslevel)\r\n sframe.setRT(time)\r\n sframe.setFloatDataArrays([fda])\r\n p = pyopenms.Precursor()\r\n if msms == 2:\r\n p.setMZ(center)\r\n p.setIsolationWindowUpperOffset(width / 2.0)\r\n p.setIsolationWindowLowerOffset(width / 2.0)\r\n sframe.setPrecursors([p])\r\n sframe.set_peaks( (mz, intens) )\r\n sframe.sortByPosition()\r\n exp.consumeSpectrum(sframe)", "def factor_exposure(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_EXPOSURE)", "def exposure_time(vmag, counts, iodine=False, t1=110., v1=8., exp1=250., iodine_factor=0.7):\n\n # flux star / flux 8th mag star\n fluxfactor = 10.0**(-0.4*(vmag-v1)) \n exptime = t1/fluxfactor \n exptime *= counts/exp1\n if iodine == False:\n exptime *= iodine_factor\n return exptime" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the method that receives the client's data and decides what to do with it. It runs in a loop to always be accepting new connections. If the data is 'status', the CCD status is returned. If the data is 'stop', the current exposure is stopped. If the data is anything else, a new thread is created and the data is sent to handle_command().
async def handle_client(reader, writer): request = None # loop to continually handle incoming data while request != 'quit': request = (await reader.read(255)).decode('utf8') print(request.encode('utf8')) #log.info('COMMAND = '+request) writer.write(('COMMAND = '+request.upper()+'\n').encode('utf8')) response = 'BAD' # check if data is empty, a status query, or potential command dataDec = request if dataDec == '': break elif 'status' in dataDec.lower(): response = 'OK' # check if the command thread is running try: if exposureState() > 0: response = response + '\nBUSY' else: response = response + '\nIDLE' except: response = response + '\nIDLE' if ccd_frame[0].s == PyIndi.ISS_ON: frameType = 'LIGHT' elif ccd_frame[1].s == PyIndi.ISS_ON: frameType = 'BIAS' elif ccd_frame[2].s == PyIndi.ISS_ON: frameType = 'DARK' elif ccd_frame[3].s == PyIndi.ISS_ON: frameType = 'FLAT' response = response+\ '\nBIN MODE = '+str(ccd_bin[0].value)+'x'+str(ccd_bin[1].value)+\ '\nCCD TEMP = '+str(ccd_temp[0].value)+\ 'C\nLAST FRAME TYPE = '+str(frameType)+\ '\nFILE DIR = '+str(fileDir)+\ '\nLAST IMAGE = '+str(imgName) # send current status to open connection & log it #log.info('RESPONSE: '+response) writer.write((response+'\nDONE\n').encode('utf-8')) elif 'stop' in dataDec.lower(): # check if the command thread is running try: if comThread.is_alive(): response = 'OK: aborting exposure' ccd_abort[0].s=PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_abort) blobEvent.set() #Ends the currently running thread. response = response+'\nExposure Aborted' else: response = 'OK: idle' except: response = 'OK: idle' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # check if the command thread is running, may fail if not created yet, hence try/except try: if comThread.is_alive(): response = 'BAD: busy' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() except: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() await writer.drain() writer.close()
[ "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n self.recieved_string = \"\"\n\n print \"Client connected at \" + self.ip + \":\" + str(self.port)\n\n # Loop that listens for messages from the client\n while True:\n self.received_string = self.connection.recv(4096).strip()\n print self.recieved_string\n self.process(self.received_string)\n # TODO: Add handling of received payload from client", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def main(self):\n while True:\n if not self.data_server_command.empty():\n command_data_server = self.data_server_command.get()\n if command_data_server[0] == 4:\n thread.start_new_thread(self.get_file, (command_data_server[1],))\n else:\n self.data_server_command_def[command_data_server[0]](command_data_server[1])", "def run(self):\n while True:\n try:\n (size, msg) = self.__recvFromVPICC()\n except socket.error as e:\n if not self.host:\n logging.info(\"Waiting for vpcd on port \" + str(self.port))\n (self.sock, address) = self.server_sock.accept()\n continue\n else:\n sys.exit()\n\n if not size:\n logging.warning(\"Error in communication protocol (missing \\\n size parameter)\")\n elif size == VPCD_CTRL_LEN:\n if msg == chr(VPCD_CTRL_OFF):\n logging.info(\"Power Down\")\n logging.info(\"-\" * 70)\n self.os.powerDown()\n elif msg == chr(VPCD_CTRL_ON):\n logging.info(\"Power Up\")\n self.os.powerUp()\n elif msg == chr(VPCD_CTRL_RESET):\n logging.info(\"Reset\")\n self.os.reset()\n elif msg == chr(VPCD_CTRL_ATR):\n #logging.info(\"ATR\")\n msg = self.os.getATR()\n #logging.info(\"\\nATR (%d bytes):\\n%s\", len(msg), hexdump(msg))\n self.__sendToVPICC(msg)\n else:\n logging.warning(\"unknown control command\")\n else:\n if size != len(msg):\n logging.warning(\"Expected %u bytes, but received only %u\",\n size, len(msg))\n\n answer = self.attack.user_execute(msg)\n self.__sendToVPICC(answer)", "def handle_data(self):\n\n server = jj2server(self.key)\n new = True # server is always new when connection is opened\n self.client.settimeout(10) # time out in 10 seconds unless further data is received\n self.ls.log.info(\"Server connected from %s\" % self.key)\n broadcast = False\n\n # keep connection open until server disconnects (or times out)\n while self.looping:\n try:\n data = self.client.recv(1024)\n except (socket.timeout, TimeoutError):\n # if no lifesign for 30 seconds, ping to see if the server is still alive\n data = None\n try:\n ping = self.client.send(bytearray([0]))\n except (socket.timeout, TimeoutError, ConnectionError) as e:\n self.ls.log.info(\"Server %s did not respond to ping (%s), delisting\" % (repr(e), self.key))\n break\n if ping == 1:\n self.ls.log.info(\"Ping from server %s\" % self.key)\n server.update_lifesign()\n else:\n self.ls.log.info(\"Server from %s timed out\" % self.key)\n break\n except ConnectionError as e:\n self.ls.log.info(\"Server %s closed: connection error (%s)\" % (self.key, e))\n break\n\n if banned(self.ip):\n self.ls.log.warning(\"Delisting server from banned IP %s\" % self.ip)\n break\n\n # new server wants to get listed\n if new and data and len(data) == 42:\n # check for spamming\n other = fetch_one(\"SELECT COUNT(*) FROM servers WHERE ip = ?\", (self.ip,))[0]\n if other >= config.MAXSERVERS and not whitelisted(self.ip):\n self.ls.log.warning(\"IP %s attempted to list server, but has 2 listed servers already\" % self.ip)\n self.error_msg(\"Too many connections from this IP address\")\n break\n\n self.ls.log.info(\"Server listed from %s\" % self.key)\n self.client.settimeout(32) # should have some form of communication every 30 seconds, with some leeway\n\n new = False\n\n port = int.from_bytes(data[0:2], byteorder=\"little\")\n exists = fetch_one(\"SELECT COUNT(*) FROM servers WHERE ip = ? AND port = ?\", (self.ip, port))[0]\n if exists > 0:\n self.ls.log.warning(\"Server %s tried to connect on port %s, but port already in use; refusing\" % (self.ip, port))\n self.error_msg(\"Reconnecting too fast: please wait a few seconds before relisting\")\n break\n\n name = server.validate_name(data[2:32].decode(\"ascii\", \"ignore\"), self.ip,\n \"Server on %s\" % self.ip)\n\n players = int(data[35])\n max_players = int(data[36])\n flags = int(data[37])\n version = data[38:]\n\n mode = (flags >> 1) & 31\n\n server.set(\"name\", name)\n server.set(\"private\", flags & 1)\n server.set(\"plusonly\", flags & 128)\n server.set(\"ip\", self.ip)\n server.set(\"port\", port)\n server.set(\"players\", players)\n server.set(\"max\", max_players)\n server.set(\"mode\", decode_mode(mode))\n server.set(\"version\", decode_version(version))\n server.set(\"origin\", self.ls.address)\n\n broadcast = True\n\n # existing server sending an update\n elif not new and data and (len(data) == 2 or data[0] == 0x02):\n broadcast = True\n if data[0] == 0:\n if server.get(\"players\") != data[1]:\n self.ls.log.info(\"Updating player count for server %s\" % self.key)\n server.set(\"players\", data[1])\n else:\n self.ls.log.info(\"Received ping from server %s\" % self.key)\n server.update_lifesign()\n elif data[0] == 0x01:\n self.ls.log.info(\"Updating game mode for server %s\" % self.key)\n server.set(\"mode\", decode_mode(int(data[1])))\n elif data[0] == 0x02:\n self.ls.log.info(\"Updating server name for server %s\" % self.key)\n name = server.validate_name(data[1:33].decode(\"ascii\", \"ignore\"), self.ip,\n \"Server on %s\" % self.ip)\n server.set(\"name\", name)\n elif data[0] == 0x03:\n self.ls.log.info(\"Updating max players for server %s\" % self.key)\n server.set(\"max\", data[1])\n elif data[0] == 0x04:\n self.ls.log.info(\"Updating public/private for server %s\" % self.key)\n server.set(\"private\", data[1] & 1)\n elif data[0] == 0x05:\n self.ls.log.info(\"Updating plusonly for server %s\" % self.key)\n server.set(\"plusonly\", data[1] & 1)\n\n # server wants to be delisted, goes offline or sends strange data\n else:\n if not new:\n if data is not None and ( len(data) == 0 or (data[0] == 0x00 and len(data) > 16) ):\n # this usually means the server has closed\n if len(data) > 16:\n self.ls.log.info(\"Server sent goodbye: %s\" % repr(data))\n self.ls.log.info(\"Server from %s closed; delisting\" % self.key)\n break\n elif data is not None:\n self.ls.log.info(\"Received empty data from server %s (%s), ignoring\" % (self.key, repr(data)))\n else:\n self.ls.log.warning(\"Server from %s provided faulty listing data: not listed\" % self.key)\n self.error_msg(\"Invalid data received\")\n break\n\n # broadcast updates to connected mirrors\n if broadcast:\n self.ls.broadcast(action=\"server\", data=[server.flush_updates()])\n\n time.sleep(config.MICROSLEEP)\n\n # server presumed dead, remove from database\n server.forget()\n\n # make sure mirrors also delist the server\n self.ls.broadcast(action=\"delist\", data=[server.data])\n\n self.end()", "def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []", "def listen(self):\n\n # Check for new data\n # If new data is available:\n # Enqueue the command and proceed\n\n # Since the socket is non-blocking, handle the\n # \"Resource temporarily unavailable\" exception.\n try:\n raw_data = self.rx_socket.recv(4096)\n\n except socket.error as err:\n if err.args[0] == 11:\n return\n # If the exception was not the \"Resource temporarily unavailable\"\n # then something bad happened and should be reported.\n print \"Fatal Exception: \",\n print err.args[1]\n exit(1)\n\n except AttributeError as err:\n # The quit command causes this exection to be thrown.\n exit(0)\n\n # To get here, data must have been recieved. Pre-check it and\n # enqueue if necessary.\n data = raw_data.lower().split()\n cmd = data[0]\n\n # If command is no-argument, then args will be the empty list.\n args = data[1:]\n\n if cmd in self.cb_funcs:\n self.enqueue_cmd( (cmd, args) )", "def receiveCommand(data):\n self.clientData.append('Received Command')\n a = {}\n a['data'] = data\n a['messageType'] = 'Command'\n a['widget'] = 'NLP'\n self.data.append(a)\n print(printHeader('FFBOLab Client NLP') + \"Received a command.\")\n self.tryComms(a)\n return True", "def listen_to_connection(self, conn):\n with conn:\n print(\"Connected\")\n while self.running:\n data = conn.recv(32)\n if not data: \n return\n \n print(\"Recived Data:\"+str(data))\n self.__update(data)", "def run(self):\n while self._running:\n try:\n msg = self.ser.readline()\n if msg != None and len(msg) > 0:\n self.__send_command(msg)\n\n # If time is out\n except TimeoutError:\n print('No data sent for ' + str(elf.timeout) + 'seconds...')\n except Exception as err:\n print(\"COMMANDS THREAD EXCEPTION\")\n print(err)\n print(msg)\n print(\"END OF COMMANDS THREAD EXCEPTION\")", "def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()", "def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)", "def guacd_listener(self):\n while True:\n instruction = self.client.receive()\n self.ws.send(instruction)", "def _listen(self):\n if not self.is_connected:\n self.connect()\n\n while True:\n data = self.recv()\n ping = PING_RE.match(data)\n if ping:\n self.handle_ping(ping.group(1))\n else:\n result = self.handle_message(data)\n\n if result:\n print(result)\n\n time.sleep(1)", "def on_eth_data(self, data):\n global g_running\n if len(data) == 0:\n self.zeroes = self.zeroes + 1\n if self.zeroes > 100000:\n out(\"WARNING: wild loop, eth rx\")\n sys.exit(1) # consider this a wild loop, kill it off\n return\n self.zeroes = 0\n if self.type == CLIENT_CTRL:\n strdata = str(data, 'ascii')\n self.cmd = self.cmd + strdata\n if strdata.endswith(\"\\n\"):\n cmds = self.cmd.split(\"\\n\")\n for c in cmds:\n if len(c.strip()) == 0:\n continue\n try:\n self.on_command(c)\n except serial.serialutil.SerialException as e:\n self.error(\"serial:{}\".format(str(e)))\n except:\n self.error(\"unknown:{}\".format(sys.exc_info()[0]))\n traceback.print_exc()\n self.cmd = \"\"\n\n elif self.type == CLIENT_DATA_RXTX:\n if self.ctrl_client.uart != None:\n lldbg(\" eth{:s}<-{:s}\".format(self.ctrl_client.uart.name, str(data)))\n self.ctrl_client.uart.q_eth2ser.put(data)", "def handle_client_msg(self, msg):\n command = msg.split()\n code = int(command[0])\n input_msg = 'Enter your command: '\n\n #if self.is_not_included_in_options(command):\n # input_msg = '(failure on last attempt)--Enter your command: '\n if code == client.EXIT:\n self.send_prints(msg)\n time.sleep(1)\n self.socket.close()\n sys.exit()\n elif code == client.SELECT_ALL:\n #send info\n self.send_prints(msg)\n data = self.socket.recv(1024).decode('utf-8')\n data = self.clean_data(data)\n #printing\n self.print_received_table(data)\n\n elif code == client.SELECT_ONE:\n #send info\n self.send_prints(msg)\n data = self.socket.recv(1024).decode('utf-8')\n data = self.clean_data(data)\n # printing\n self.print_received_table(data)\n\n elif code == client.DEPOSIT:\n code,id,value = command\n if int(value) <= 0 :\n print(\"Deposit should not be negative!\")\n elif int(value) % 5 != 0:\n print(\"Deposit should be multiple of 5!\")\n else:\n self.send_prints(msg)\n data = self.socket.recv(1024).decode('utf-8')\n self.print_received_msg(data)\n\n elif code == client.WITHDRAWAL:\n code, id, value = command\n if int(value) <= 0 :\n print(\"withdrawal should not be negative!\")\n elif self.check_if_50s_and_20s( int(value)):\n print(\"ATM returns only 50s and 20s\")\n else:\n self.send_prints(msg)\n data = self.socket.recv(1024).decode('utf-8')\n self.print_received_msg(data)\n\n else:\n # send info\n self.send_prints(msg)\n data = self.socket.recv(1024).decode('utf-8')\n self.print_received_msg(data)\n return input_msg", "def _recvdata(self):\n try:\n resp_data = ''\n server_response = ServerResponse(self._req_type)\n while True:\n # TODO: Limit the bufsize based on _req_type?\n recv_str, address = self._client.recvfrom(65538)\n if recv_str == '':\n raise RuntimeError(\"socket connection broken\")\n server_response.append(recv_str)\n if not server_response.isvalid:\n # TODO: 3.2.5.4 Waiting Completed\n # Only CLNT_BCAST_EX ignores invalid messages\n # if not self._req_type == CLNT_BCAST_EX:\n # raise SomeError(...)\n break\n if server_response.iscomplete:\n if self._reader_callback is not None:\n # This is actually a violation of the RFC.\n # 3.2.5.4 Waiting Completed states a client SHOULD\n # buffer all responses until timer has timed out.\n self._server_responses.append(server_response)\n self._reader_callback()\n break\n\n except Exception as e:\n print('An error occurred in Client._recvdata():\\n' + str(e))", "def receiveData(data):\n self.clientData.append('Received Data')\n a = {}\n if self.legacy == True:\n a['data'] = {'data': data, 'queryID': guidGenerator()}\n else:\n a['data'] = data\n a['messageType'] = 'Data'\n a['widget'] = 'NLP'\n self.data.append(a)\n if 'data' in a['data']:\n for i in a['data']['data'].keys():\n if 'name' in a['data']['data'][i]:\n self.uname_to_rid[a['data']['data'][i]['name']] = i\n print(printHeader('FFBOLab Client NLP') + \"Received data.\")\n self.tryComms(a)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of names of accessible repositories (including owner)
def list_repositories(self): data = self._get_all_data('/user/repos') return [repo['full_name'] for repo in data]
[ "def repositories():\n return user.repos()", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def list_repositories(self):\n return list(self.repositories.values())", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def list_repos(self):\n user = Github(self.token).get_user()\n repos= {user.login: user.get_repos()}\n for org in user.get_orgs():\n repos[org.login] = org.get_repos()\n return repos", "def list_ecr_repositories():\n repositories = ECS_MANAGER.list_ecr_repositories()\n\n if repositories:\n print(str_sep)\n print(\"Listing repositories available in {}\".format(SESSION.region_name.upper()))\n print(\"{:30}{:60}\".format('NAME', 'URI'))\n print(str_sep)\n\n for rep in repositories['repositories']:\n print(\"{:30}{:60}\".format(rep['repositoryName'], rep['repositoryUri']))", "def getContributorList(ownerLoginName, repoName):\n try:\n config = Config('app.properties')\n url = config.getProperty('contributors', 'url').format(ownerLoginName, repoName)\n username = config.getProperty('DEFAULT', 'username')\n password = config.getProperty('DEFAULT', 'password')\n response = requests.get(url, auth=(username, password))\n return _getContributorList(response)\n except requests.exceptions.RequestException:\n return []", "def get_known_repos() -> List[str]:\n return [db.name for db in PacmanConfig(conf=\"/etc/pacman.conf\").initialize_alpm().get_syncdbs()]", "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def n_public_repos(gh, user):\n return getuser(gh, user).public_repos", "def get_repositories(request):\n return [\n {\n 'id': repo.id,\n 'code': repo.code,\n 'name': repo.name,\n 'linux_path': repo.linux_path,\n 'osx_path': repo.osx_path,\n 'windows_path': repo.windows_path\n }\n for repo in Repository.query.all()\n ]", "def getDepositories(self):\n return self._priv_depositories", "def get_registries():\n url = \"/\".join([REGISTRY_BASE, \"_catalog\"])\n response = req(url)\n if response is not None:\n return response[\"repositories\"]\n return []", "def get_repos() -> list:\n def fix_name(name: str) -> str: return name.replace(' ', '-')\n if args.user:\n users = list(map(fix_name, args.user))\n data = get_repos_to_watch_from(users)\n else:\n try:\n file = args.json or next(f for f in os.listdir('.')\n if f.endswith('.json'))\n with open(file) as f:\n data = json.load(f)\n except (StopIteration, FileNotFoundError):\n parser.error('No json file found.')\n except json.JSONDecodeError:\n parser.error('Failed to decode the json file.')\n if not (isinstance(data, dict) and\n all(isinstance(user, str) and isinstance(repos, list) and\n all(isinstance(repo, str) for repo in repos)\n for user, repos in data.items())):\n parser.error(\"The given file does not have the appropriate \"\n \"structure: {user1: [repo1, ...], ...}.\")\n users = list(map(fix_name, data))\n data = {(fix_name(user), fix_name(repo))\n for user, repos in data.items() for repo in repos}\n # Thanks to github api.\n # We will only parse wanted repositories with issues/pulls.\n data &= get_repos_to_watch_from(users)\n return list(data)", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get dict of labels with colors for given repository slug
def list_labels(self, repository): data = self._get_all_data('/repos/{}/labels'.format(repository)) return {l['name']: str(l['color']) for l in data}
[ "def get_colors() -> Dict[str, cq.Color]:\n colors = {}\n for name in dir(Quantity):\n splitted = name.rsplit(SEP, 1)\n if splitted[0] == OCP_COLOR_LEADER:\n colors.update({splitted[1].lower(): cq.Color(splitted[1])})\n\n return colors", "def get_label_color(status):\n\n colors = {'NEW':'grey',\n 'ASSIGNED':'blue',\n 'OPEN': 'orange',\n 'FIXED': 'purple',\n 'RETEST':'cyan',\n 'REOPENED':'orange',\n 'VERIFIED': 'green',\n 'BLOCKED': 'red',\n 'CLOSED':'black',\n }\n\n return colors[status]", "def create_label_colormap():\n colormap = np.zeros((256, 3), dtype=np.uint8)\n for i, color in enumerate(COCO_CATEGORIES):\n colormap[i] = color['color']\n return colormap", "def colors(self):\n return (l.get_color() for l in self.labels)", "def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors", "def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'", "def labels(cls, collection):\n map = {}\n for item in collection:\n #map[item] = label(item, collection)\n map[item] = r\"$\\rm{%s}$\" % item.name\n return map", "def list_labels(service, repo):\n app = App()\n if repo:\n serv = app.get_service(service, repo=repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n print(tabulate([\n (\n label.name,\n label.color,\n label.description\n )\n for label in repo_labels\n ], tablefmt=\"fancy_grid\"))", "def assign_colour_label_data(catl):\n\n logmstar_arr = catl.logmstar.values\n u_r_arr = catl.modelu_rcorr.values\n\n colour_label_arr = np.empty(len(catl), dtype='str')\n for idx, value in enumerate(logmstar_arr):\n\n # Divisions taken from Moffett et al. 2015 equation 1\n if value <= 9.1:\n if u_r_arr[idx] > 1.457:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value > 9.1 and value < 10.1:\n divider = 0.24 * value - 0.7\n if u_r_arr[idx] > divider:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value >= 10.1:\n if u_r_arr[idx] > 1.7:\n colour_label = 'R'\n else:\n colour_label = 'B'\n \n colour_label_arr[idx] = colour_label\n \n catl['colour_label'] = colour_label_arr\n\n return catl", "def milestone_labels(argv=None):\n argv = argv or sys.argv[1:]\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('color', help='color to make the labels')\n\n args = parser.parse_args(argv)\n\n session = GithubSession()\n\n labels = session.get_labels()\n\n labels_by_name = dict([(label['name'], label) for label in labels])\n\n for milestone in session.get_milestones():\n label_name = f'epic:{milestone[\"title\"]}'\n\n if label_name in labels_by_name:\n continue\n\n labels_by_name[label_name] = session.create_label(label_name, args.color)\n\n return labels_by_name", "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def _extract_labels(time_series):\n labels = {\"resource_type\": time_series.resource.type}\n labels.update(time_series.resource.labels)\n labels.update(time_series.metric.labels)\n return labels", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def get_color(category_name: str):\r\n a = ['noise', 'animal', 'human.pedestrian.adult', 'human.pedestrian.child', 'human.pedestrian.construction_worker',\r\n 'human.pedestrian.personal_mobility', 'human.pedestrian.police_officer', 'human.pedestrian.stroller',\r\n 'human.pedestrian.wheelchair', 'movable_object.barrier', 'movable_object.debris',\r\n 'movable_object.pushable_pullable', 'movable_object.trafficcone', 'static_object.bicycle_rack', 'vehicle.bicycle',\r\n 'vehicle.bus.bendy', 'vehicle.bus.rigid', 'vehicle.car', 'vehicle.construction', 'vehicle.emergency.ambulance',\r\n 'vehicle.emergency.police', 'vehicle.motorcycle', 'vehicle.trailer', 'vehicle.truck', 'flat.driveable_surface',\r\n 'flat.other', 'flat.sidewalk', 'flat.terrain', 'static.manmade', 'static.other', 'static.vegetation',\r\n 'vehicle.ego']\r\n class_names = [\r\n 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',\r\n 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'\r\n ]\r\n #print(category_name)\r\n if category_name == 'bicycle':\r\n return nusc.colormap['vehicle.bicycle']\r\n elif category_name == 'construction_vehicle':\r\n return nusc.colormap['vehicle.construction']\r\n elif category_name == 'traffic_cone':\r\n return nusc.colormap['movable_object.trafficcone']\r\n\r\n for key in nusc.colormap.keys():\r\n if category_name in key:\r\n return nusc.colormap[key]\r\n return [0, 0, 0]", "def Labels(self) -> Dict[str, str]:\n labels = self.Read().spec.selector # type: Dict[str, str]\n return labels", "def prepare_color_dict():\n\n\tfn_colors = {}\t\t# Dictionnaire pour associer les genes a une couleur refletant la categorie fonctionnelle; voir fn prepare_color_dict\n\n\n\t#\t\t\t 0\t\t\t\t\t 4\t\t\t\t\t\t 9\t\t\t\t\t\t 14\t\t\t\t\t 19\t\t\t\t 24 25\n\talphabet = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\n\n\n\tpil = alphabet[8:22] + [\"VA\"]\t# les pil genes connus vont de pilI a pilV et on a aussi un gene \"pilVA\" \n\t\n\t# lightblue genes\n\n\tfor p in pil:\n\t\tfn_colors[\"pil\" + p] = colors.lightblue\n\n\n\t# pink genes\n\n\ttra = alphabet[:-1]\t\t\t\t# les genes tra vont de traA a traX\n\n\tfor i in tra:\n\t\tfn_colors[\"tra\" + i] = colors.pink\n\n\ttrb = alphabet[:3]\t\t\t\t# traA, traB et traC\n\n\n\tfor i in trb:\n\t\tfn_colors[\"trb\" + i] = colors.pink\n\n\n\totherPinkGenes = [\"sogL\", \"sogS\", \"yggA\", \"nikA\", \"nikB\"]\n\n\tfor i in otherPinkGenes:\t\n\t\tfn_colors[i] = colors.pink\n\n\n\t# orange genes\n\n\tfn_colors[\"rci\"] = colors.orange\t# un seul gene dans cette categorie\n\n\n\t# blue genes\n\n\tblueGenes = [\"repZ\", \"repC\", \"ISEcp1\", \"Tn1721\", \"IS1S\", \"tnpA\", \"intI1\", \"IS5075\", \"IS5\", \"Tn2\", \"hin\", \"TnAs1\", \"ISSbo1\"]\n\n\n\tfor i in blueGenes:\n\t\n\t\tfn_colors[i] = colors.blue\n\n\n\t# green genes\n\n\tgreenGenes = [\"ibfA\", \"ydeA\", \"ydfA\", \"ydfB\", \n\t\t\t\t \"mck\", \"kor\",\n\t\t\t\t \"ydiA\", \"ydjA\", \"pifA\",\n\t\t\t\t \"yefA\",\"yegA\", \"parA\", \"parB\", \"impB\", \"impA\", \"impC\",\n\t\t\t\t \"yfaA\",\"yfaB\", \"yfbA\", \"yfbB\", \"yfcA\", \"yfcB\", \"yfeA\",\"yfeB\",\"yfeC\",\n\t\t\t\t \"yfhA\", \"psiB\", \"psiA\", \"ardA\"]\n\n\n\tfor i in greenGenes:\n\t\t\n\t\tfn_colors[i] = colors.green\n\n\treturn fn_colors", "def _get_node_colors(label_groups, orig_labels, cortex_colors=None):\n labels_ordered, group_numbers, group_names = _get_node_grouping(label_groups, orig_labels)\n\n if cortex_colors is None:\n\n from matplotlib.pyplot import cm\n cortex_list = []\n for group in label_groups:\n cortex_list.append(list(group.keys())[0])\n n_colors = len(cortex_list)\n\n cortex_colors = cm.gist_rainbow(np.linspace(0, 1, n_colors))\n cortex_colors_ = []\n for gn in group_names:\n idx = cortex_list.index(gn)\n cortex_colors_.append(cortex_colors[idx])\n cortex_colors = cortex_colors_\n\n label_colors = []\n for ind, rep in enumerate(group_numbers):\n label_colors += [cortex_colors[ind]] * rep\n\n assert len(label_colors) == len(labels_ordered), 'Number of colours do not match'\n\n # the order of the node_colors must match that of orig_labels\n # therefore below reordering is necessary\n\n node_colors = [label_colors[labels_ordered.index(orig)] for orig in orig_labels]\n\n return node_colors", "def getLabels(kmeans, options):\r\n \r\n#########################################################\r\n## YOU MUST REMOVE THE REST OF THE CODE OF THIS FUNCTION\r\n## AND CHANGE FOR YOUR OWN CODE\r\n#########################################################\r\n## remind to create composed labels if the probability of \r\n## the best color label is less than options['single_thr']\r\n \r\n meaningful_colors = []\r\n unique = []\r\n j = 0\r\n \r\n for i in kmeans.centroids:\r\n if np.amax(i) < options['single_thr']:\r\n tmp = i.flatten()\r\n tmp.sort()\r\n main = cn.colors[np.where(i == tmp[-1])[0][0]]\r\n secondary = cn.colors[np.where(i == tmp[-2])[0][0]]\r\n \r\n if main < secondary:\r\n color = main + secondary\r\n else:\r\n color = secondary + main\r\n \r\n if color not in meaningful_colors:\r\n meaningful_colors.append(color)\r\n unique.append([j])\r\n else:\r\n unique[meaningful_colors.index(color)].append(j)\r\n \r\n else:\r\n if cn.colors[np.argmax(i)] in meaningful_colors:\r\n unique[meaningful_colors.index(cn.colors[np.argmax(i)])].append(j)\r\n else:\r\n meaningful_colors.append(cn.colors[np.argmax(i)])\r\n unique.append([j])\r\n \r\n j += 1\r\n \r\n return meaningful_colors, unique", "def nhsd_colours():\n\n nhsd_chart_colours = [\"#005EB8\", \"#71CCEF\", \"#84919C\", \"#003087\", \"#D0D5D6\"]\n nhsd_chart_background = {\"chart_grey_3\": \"#F8F8F8\", \"white\": \"#FFFFFF\"}\n nhsd_core_colours = {\n \"white\": \"#ffffff\",\n \"white_tints\": [\"#f9fafb\", \"#f3f5f6\", \"#edeff1\", \"#def2e5\"],\n \"nhs_blue\": \"#005eb8\",\n \"blue_tints\": [\"#337EC6\", \"#ACCAE8\", \"#D4E4F3\", \"#E6EFF8\"],\n \"nhs_dark_grey\": \"#425563\",\n \"grey_tints\": [\n \"#687784\",\n \"#98A4AD\",\n \"#B3BBC1\",\n \"#DFE2E5\",\n \"#EDEFF1\",\n \"#F3F5F6\",\n \"#F9FAFB\",\n ],\n \"nhs_mild_grey\": \"#768692\",\n \"nhs_warm_yellow\": \"#FFB81C\",\n \"warm_yellow_tints\": [\"#FFE8B4\", \"#FFF1CC\", \"#FFF8E8\"],\n }\n nhsd_font = [\"Frutiger Light\", \"Frutiger Roman\"]\n nhsd_font_backup = [\"Arial\"]\n colour_dict = {\n \"chart\": nhsd_chart_colours,\n \"chart_background\": nhsd_chart_background,\n \"core\": nhsd_core_colours,\n \"font\": nhsd_font,\n \"font_backup\": nhsd_font_backup,\n }\n return colour_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create new label in given repository
def create_label(self, repository, name, color, **kwargs): data = {'name': name, 'color': color} response = self.session.post( '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository), json=data ) if response.status_code != 201: raise GitHubError(response)
[ "def test_issue_create_label(self):\n pass", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def test_issue_add_label(self):\n pass", "def add_repository_labels(self, eitem):\n\n eitem[REPO_LABELS] = self.repo_labels", "def create(self, label_name):\n try:\n label = self.service.users().labels().create(userId=self.user_id,\n body=self.make_label(label_name)).execute()\n print\n label['id']\n return label\n except errors.HttpError as error:\n print('An error occurred: {}'.format(error))", "def create_trello_label(label_name, label_color, board_id):\n\turl = \"https://api.trello.com/1/labels\"\n\n\tquery = {\n\t\t'key': TRELLO_API_KEY,\n\t\t'token': TRELLO_TOKEN,\n\t\t'name': label_name,\n\t\t'color': label_color,\n\t\t'idBoard': board_id\n\t}\n\n\tresponse = requests.request(\n\t\t\"POST\",\n\t\turl,\n\t\tparams=query\n\t)\n\n\tprint(response.text)", "def create_label(service, user_id, label_object):\n try:\n label = service.users().labels().create(userId=user_id,\n body=label_object).execute()\n print(\n 'created label name: {}, label id: {}'.format(label_object[\"name\"],\n label['id']))\n return label\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def label(self, name):\r\n return labels.RepoLabel(self, name)", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def apply(\n repo: str,\n filename: str = typer.Option(\"labels.yaml\", \"--filename\", \"-f\"),\n github_token: str = typer.Option(None, envvar=\"GITHUB_TOKEN\"),\n):\n try:\n with open(filename) as f:\n labels = yaml.safe_load(f)\n except FileNotFoundError:\n typer.echo(f\"File {filename} not found!\")\n raise typer.Exit(1)\n\n gh_repo = get_repo(repo, github_token)\n\n print(f\"Applying labels to {gh_repo.html_url}/labels\")\n\n for label in labels:\n if label[\"description\"] is None:\n label[\"description\"] = \"\"\n try:\n existing_label = gh_repo.get_label(label[\"name\"])\n existing_label.edit(name=label[\"name\"], color=label[\"color\"], description=label[\"description\"])\n print(f\"Edited label {label}\")\n except github.GithubException as e:\n if e.status == 404:\n gh_repo.create_label(name=label[\"name\"], color=label[\"color\"], description=label[\"description\"])\n print(f\"Created label {label}\")\n else:\n raise", "def create_label(program, ref: str):\n program.set_label_index(base3_to_int(ref), program.current_index)", "def create(self):\n return dydra.Operation(self.client.call('repository.create', self.name), client=self.client)", "def create_issue(OWNER, token, repository, title, description, label):\n g = Github(token)\n repo_name = repository.split(\"/\")[4]\n repo = g.get_repo(\"{owner}/{repo_name}\".format(owner=OWNER, repo_name=repo_name))\n return repo.create_issue(title=title, body=description, labels=[label])", "def add_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--add-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def create_issue(repo, issue, label, key):\n global debug\n print(\"..\",issue['title'])\n if repo and not debug:\n # print(\" .. Created ({title})\", title=issue['title'])\n body = issue_body(\n issue['body'], repo.default_branch, issue['location'], issue['context']\n )\n res = repo.create_issue(\n title=issue['title'],\n body=body,\n labels=[label]\n )\n issue['number'] = res.number\n issue['created_at'] = res.created_at\n else:\n print(\" .. Skipped because no repository identified.\")\n\n tweak_todo(key, issue['title'], issue['location'])\n\n issue.pop('context', 'No Key found')\n return issue", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def label_new(request):\n if request.method == 'POST':\n form = NewLabelForm(request.POST)\n\n if form.is_valid():\n label = form.save()\n messages.success(request, 'Label successfully created.')\n return HttpResponseRedirect(reverse('label_main', args=[label.id]))\n else:\n messages.error(request, 'Please correct the errors below.')\n else:\n form = NewLabelForm()\n\n return render_to_response('annotations/label_new.html', {\n 'form': form,\n },\n context_instance=RequestContext(request)\n )", "def test_create_repository(self):\n pass", "def create_label(project_id: int, label_name: str, templates: list, session=konfuzio_session(), **kwargs) -> List[dict]:\n url = get_create_label_url()\n templates_ids = [template.id for template in templates]\n\n description = kwargs.get('description', None)\n has_multiple_top_candidates = kwargs.get('has_multiple_top_candidates', False)\n data_type = kwargs.get('data_type', 'Text')\n\n data = {\"project\": project_id,\n \"text\": label_name,\n \"description\": description,\n \"has_multiple_top_candidates\": has_multiple_top_candidates,\n \"get_data_type_display\": data_type,\n \"templates\": templates_ids\n }\n\n r = session.post(url=url, json=data)\n\n assert r.status_code == requests.codes.created, f'Status of request: {r}'\n label_id = r.json()['id']\n return label_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update existing label in given repository
def update_label(self, repository, name, color, old_name=None, **kwargs): data = {'name': name, 'color': color} response = self.session.patch( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, old_name or name ), json=data ) if response.status_code != 200: raise GitHubError(response)
[ "def update(self):\n args = {attr: getattr(self, attr) for attr in self.to_update}\n args[\"id\"] = self.id\n _perform_command(self.owner, \"label_update\", args)", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def UpdateLabel(self) -> _n_6_t_0:", "def updatelabel(task, label, eid):\n ServerManager.get()\n result = ServerManager.api.update_property(task, eid, prop='label', value=label)\n if result.response_type == 'success':\n click.echo(click.style(result.message, fg='green'))\n else:\n click.echo(click.style(result.message, fg='red'))", "def add_repository_labels(self, eitem):\n\n eitem[REPO_LABELS] = self.repo_labels", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def apply(\n repo: str,\n filename: str = typer.Option(\"labels.yaml\", \"--filename\", \"-f\"),\n github_token: str = typer.Option(None, envvar=\"GITHUB_TOKEN\"),\n):\n try:\n with open(filename) as f:\n labels = yaml.safe_load(f)\n except FileNotFoundError:\n typer.echo(f\"File {filename} not found!\")\n raise typer.Exit(1)\n\n gh_repo = get_repo(repo, github_token)\n\n print(f\"Applying labels to {gh_repo.html_url}/labels\")\n\n for label in labels:\n if label[\"description\"] is None:\n label[\"description\"] = \"\"\n try:\n existing_label = gh_repo.get_label(label[\"name\"])\n existing_label.edit(name=label[\"name\"], color=label[\"color\"], description=label[\"description\"])\n print(f\"Edited label {label}\")\n except github.GithubException as e:\n if e.status == 404:\n gh_repo.create_label(name=label[\"name\"], color=label[\"color\"], description=label[\"description\"])\n print(f\"Created label {label}\")\n else:\n raise", "def change_issues_label(self, msg, old_label, new_label):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n client = self._github_operator(msg)\n cmd = \"repo:{} label:{} is:open type:issue\".format(\n task_repository_name(), old_label)\n issue_list = client.search_issue(cmd, 10)\n for issue in issue_list:\n trans.wait_for_limit(MAX_RESULT, MAX_RESULT)\n issue.remove_from_labels(old_label)\n issue.add_to_labels(new_label)\n yield \"{} issues has been changed label from {} to {}\".format(len(issue_list), old_label, new_label)", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def label(self, name):\r\n return labels.RepoLabel(self, name)", "def assign_label(self, dbcurs, existing_labs):\n if not self.is_usable() or self.not_identified():\n raise FindResultErr(\"Object not usable\")\n n = 0\n base = ord('A')\n while 1:\n nlab = chr(base + n % 26)\n if n >= 26:\n nlab += str(n // 26)\n if nlab not in existing_labs:\n break\n n += 1\n self.obj.label = self.label = nlab\n existing_labs.add(nlab)\n return dbcurs.execute(\"UPDATE objdata SET label=%s WHERE ind={:d}\".format(self.obj.objind), nlab)", "def update_from_repo():\n\treturn", "def update_ocm_label(\n ocm_api: OCMBaseClient,\n ocm_label: OCMLabel,\n value: str,\n) -> None:\n ocm_api.patch(\n api_path=ocm_label.href,\n data={\"kind\": \"Label\", \"key\": ocm_label.key, \"value\": value},\n )", "def update(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def repository_update():\n db = flask.current_app.container.get('db')\n gh_api = flask.current_app.container.get(\n 'gh_api', token=flask.session['github_token']\n )\n\n full_name = flask.request.form.get('full_name')\n repo = get_repo_if_admin(db, full_name)\n if repo is None:\n flask.abort(404)\n\n gh_repo = gh_api.get('/repos/' + full_name)\n if gh_repo.is_ok:\n repo.update_from_dict(gh_repo.data)\n gh_repo_langs = gh_api.get('/repos/' + full_name + '/languages')\n if gh_repo_langs.is_ok:\n repo.update_languages(gh_repo_langs.data)\n db.session.commit()\n else:\n flask.flash('GitHub doesn\\'t know about this repository. '\n 'Try it later or remove repository from app.',\n 'error')\n\n return flask.redirect(\n flask.url_for('manage.repository_detail', full_name=repo.full_name)\n )", "def _mutate_label(label: str) -> str:\n return label", "def _update_label(self, outer_pos, inner_pos, new_label):\n r, c = outer_pos\n ir, ic = inner_pos\n self.inner_boards[r][c][ir][ic][\"text\"] = new_label", "def delete_label(self, repository, name, **kwargs):\n response = self.session.delete(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, name\n )\n )\n if response.status_code != 204:\n raise GitHubError(response)", "def label_experiment(self, exp_id):\n exp = experiment.experiment(new_experiment=False, ts=str(exp_id))\n label = request.form.get('label')\n exp.update_metadata(change_label=True, label=label)\n\n return \"OK\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete existing label in given repository
def delete_label(self, repository, name, **kwargs): response = self.session.delete( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, name ) ) if response.status_code != 204: raise GitHubError(response)
[ "def delete_label(self, label):\n return self.label(label, action='DELETE')", "def test_issue_delete_label(self):\n pass", "def delete(self):\n args = {\"id\": self.id}\n _perform_command(self.owner, \"label_delete\", args)", "def repository_delete(ctx: click.Context, repository_name):\n subcommand_repository.cmd_delete(ctx.obj, repository_name)", "def step_impl_a_named_label_is_to_be_removed(context, label):\n context.bdd_helper.message_data = {\"action\": \"remove\", \"label\": label}", "def del_label(self, label):\n if label is None:\n raise ValueError(\"label is not specified\")\n self._fetch_with_no_cache()\n if label in self._msg.labels:\n self._msg.labels.remove(label)\n self._update(self._msg, method=\"PUT\")", "def delete(self, label):\n query = (\"DELETE FROM %s \" % (self.__tablename__) +\n \"WHERE label=?\",\n (label))\n self.sql_execute(query)", "def delete_label(id):\n dao.delete_label(id)\n return jsonify(dao.get_label(id))", "def delete_issue_label(repo, project, issue_number, delete_label_id):\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n current_label_ids = dao.get_issue_label_ids(issue_path)\n\n revised_label_ids = [label_id for label_id in current_label_ids\n if label_id != delete_label_id]\n\n dao.set_issue_label_ids(issue_path, revised_label_ids)\n\n return if_found(dao.get_issue_labels(issue_path))", "def test_issue_remove_label(self):\n pass", "def delete_label_from_es():\n args = _get_args(delete_label_from_es.__doc__)\n label_file = args.label_file\n es_index_name = cluster_conf.ES_INDEX if args.index is None else args.index\n log.debug(\"Starting...\")\n update_db.delete_label_only(label_file=label_file, es_index=es_index_name)\n log.debug(\"Execution completed.\")\n log.debug(\"Uploading log...\")\n logger.Logger().upload_log(index=cluster_conf.LOG_INDEX,\n es_host_ip=cluster_conf.ES_HOST_IP,\n es_host_port=cluster_conf.ES_HOST_PORT)", "def test_delete_label_useryes(self, requests_mock, input_mock):\n\n url = endpoint[\"label\"].format(\n name=self.name,\n label=self.label,\n host=self.host,\n port=self.port,\n team=self.team,\n project=self.project,\n )\n requests_mock.delete(\n url,\n text=\"label deleted\",\n status_code=200,\n )\n expected = \"delete_label success: label deleted\"\n response = self.tfd_cursor.delete_label(self.label)\n self.assertEqual(\n response,\n expected,\n msg=\"Expected: '{}', got '{}'\".format(expected, response),\n )", "def remove_by_label(self, label: str) -> None:\n node = self.get_node(label)\n self.remove_by_node(node)", "def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)", "def delete_ocm_label(ocm_api: OCMBaseClient, ocm_label: OCMLabel) -> None:\n ocm_api.delete(api_path=ocm_label.href)", "async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)", "def delete_manifest_label(label_uuid, tag_manifest):\n\n # Find the label itself.\n label = get_manifest_label(label_uuid, tag_manifest)\n if label is None:\n return None\n\n if not label.source_type.mutable:\n raise DataModelException(\"Cannot delete immutable label\")\n\n # Delete the mapping records and label.\n (TagManifestLabelMap.delete().where(TagManifestLabelMap.label == label).execute())\n\n deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n label.delete_instance(recursive=False)\n return label", "def delete(self, id_):\n\n # Get label.\n id_ = get_int_arg('id_', id_)\n label = g.db.query(Label).filter(Label.id == id_).first()\n\n if label is None:\n raise NotFound(\"Label '%s' does not exist.\" % id_)\n\n # Delete label\n g.db.delete(label)\n try:\n g.db.commit()\n except DBAPIError as e:\n raise BadRequest('Database error: {}'.format(e))\n\n message = 'Label {} deleted'.format(label.name)\n response = jsonify(message=message)\n response.status_code = 202\n\n return response", "def remove_label(label, message, user):\n actor = Modifier._get_label_actor(user=user, message=message)\n\n try:\n query = \"DELETE FROM securemessage.status WHERE label = '{0}' and msg_id = '{1}' and actor = '{2}'\".format(\n label, message[\"msg_id\"], actor\n )\n with db.engine.begin() as conn:\n conn.execute(text(query))\n return True\n except Exception as e:\n logger.error(\"Error removing label from database\", msg_id=message, label=label, user_uuid=actor, error=e)\n raise InternalServerError(description=\"Error removing label from database\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts feature vectors from a given model and dataset and writes them, along with labels, to a file. This function works for any model whose forward() method returns, on any given input x, the pair (prediction on x, feature vector for x) and more generally, any model whose second return value is a feature vector.
def extract_feature_vectors(model, data_loader, parameters, features_file_path): feature_vectors, label_vectors = [], [] # Set model to evaluation mode model.eval() # Show progress bar while iterating over mini-batches with tqdm(total=len(data_loader)) as progress_bar: for i, (X_batch, Y_batch) in enumerate(data_loader): # Dimensions of the input Tensor batch_size, channels, height, width = X_batch.size() # If GPU available, enable CUDA on data if parameters.cuda: X_batch = X_batch.cuda() Y_batch = Y_batch.cuda() # Wrap the input tensor in a Torch Variable X_batch_variable = Variable(X_batch, volatile=True) # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features Y_predicted, features = model(X_batch_variable) # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to # CPU, and convert it to a NumPy array features_numpy = features.data.cpu().numpy() # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array Y_numpy = Y_batch.cpu().numpy() # For each example in the batch, record its features and labels for j in range(batch_size): feature_vectors.append(features_numpy[j,:]) label_vectors.append(Y_numpy[j,:]) progress_bar.update() utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)
[ "def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(feat_file_name, label_file_name), color.CYAN)\n\n #Save feature vector to disk\n with open(feat_file_name, 'w') as f:\n pickle.dump(feat_vec, f)\n #Save label file\n with open(label_file_name, 'w') as f:\n pickle.dump(labels, f)", "def writeFeatures(features, labels, output_filename):\n\twith open(output_filename, 'w') as csvfile:\n\t fieldnames = features[0].keys()\n\t fieldnames.append('label')\n\t writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t writer.writeheader()\n\t for i in range(len(features)):\n\t \tfeatures[i]['label'] = labels[i]\n\t \twriter.writerow(features[i])\n\n\treturn", "def write_svm_features(clf, vectorizer, round=1, filename=\"features\"):\n\n f = open(\"%s-round%d.txt\" % (filename, round), \"w\")\n weight_feature_pairs = zip(clf.coef_.tolist()[0], vectorizer.feature_names_)\n weight_feature_pairs.sort(key=lambda x:abs(x[0]), reverse=True)\n for weight, word in weight_feature_pairs:\n f.write(\"%s\\t%g\\n\" % (word, weight))\n f.close()", "def _write_word2vec_model(model, file_path):\n model.save(file_path)", "def __saveToTxt(self, filename, vector):\n\n print(\"Saving file: \", filename)\n with open(filename, \"w\") as f:\n # labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))\n for row in vector:\n label_str = ' '.join(row.astype(str)[0:5])\n feature_str = ' '.join(row.astype(str)[5:])\n f.write(\"|labels {} |features {}\\n\".format(label_str, feature_str))", "def save_dataset(dataset, save_dir, feature_names):\n # Create directory if it doesn't exist\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n feature_files = []\n\n for f_name in feature_names:\n f_name = f_name + '.txt'\n feature_files.append(open(os.path.join(save_dir, f_name), 'w', encoding='utf-8'))\n\n for instance in dataset:\n for feature, ff in zip(instance, feature_files):\n ff.write(\"{}\\n\".format(\" \".join(feature)))\n\n for ff in feature_files:\n ff.close()", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def write_code_vectors(reader, model, data_loader, option, vector_file, mode, test_result_file):\n model.eval()\n with torch.no_grad():\n if test_result_file is not None:\n fr = open(test_result_file, \"w\")\n else:\n fr = None\n\n with open(vector_file, mode) as fv:\n for i_batch, sample_batched in enumerate(data_loader):\n id = sample_batched['id']\n starts = sample_batched['starts'].to(option.device)\n paths = sample_batched['paths'].to(option.device)\n ends = sample_batched['ends'].to(option.device)\n label = sample_batched['label'].to(option.device)\n\n preds, code_vector, _ = model.forward(starts, paths, ends, label)\n preds_prob, preds_label = torch.max(preds, dim=1)\n\n for i in range(len(starts)):\n label_name = reader.label_vocab.itos[label[i].item()]\n vec = code_vector.cpu()[i]\n fv.write(label_name + \"\\t\" + \" \".join([str(e.item()) for e in vec]) + \"\\n\")\n\n if test_result_file is not None:\n pred_name = reader.label_vocab.itos[preds_label[i].item()]\n fr.write(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\n\".format(id[i].item(), label_name == pred_name, label_name, pred_name, preds_prob[i].item()))\n\n if test_result_file is not None:\n fr.close()", "def export_model(self, model):\n os.mkdir(model)\n classifier_path = os.path.join(model, 'classifier')\n parameters_path = os.path.join(model, 'parameters.pkl')\n parameters = {'label_mapping': self.label_mapping, 'word_regex': self.word_regex, 'tokenize': self.tokenize,\n 'ignored_regexes': self.ignored_regexes, 'stem': self.stem,\n 'tfidf_vectorizer': self.tfidf_vectorizer}\n with open(classifier_path, 'wb') as f:\n dill.dump(self.clf, f)\n with open(parameters_path, 'wb') as f:\n dill.dump(parameters, f)", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_features_to_file(path: str, features: Data_dict_type, labels: Labels_dict_type_numpy):\n for key, item in features.items():\n filename = key\n values, sample_rate = item\n window_labels = labels[filename].reshape((-1, 1))\n concatenated_data = np.concatenate(\n [np.array([i for i in range(values.shape[0])])[..., np.newaxis], # window_idx\n values, # features\n window_labels], axis=-1) # labels\n df_to_save = pd.DataFrame(data=concatenated_data)\n columns = ['window_idx'] + ['feature_%i' % i for i in range(values.shape[-1])] + ['label']\n df_to_save.columns = columns\n df_to_save.to_csv(os.path.join(path, filename.split('.')[0] + '.csv'), index=False)", "def save_feature_file() :\n training_features, training_label = generate_features(\"./boats/\")\n np.save(\"training_data\", training_features)\n np.save(\"training_label\", training_label)", "def dump_vecs():\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n \n with open(v_file, 'wb') as f:\n pickle.dump(VECTORIZER, f)\n with open(d_file, 'wb') as f:\n pickle.dump(CECTORIZER, f)", "def save_coefficients(model, filename):\n with h5py.File(filename, \"w\") as hf:\n hf.create_dataset(\"coef\", data=model.coef_)\n hf.create_dataset(\"intercept\", data=model.intercept_)\n if is_classifier(model):\n hf.create_dataset(\"classes\", data=model.classes_)", "def export_model(self, clf, model_path=\"model.pkl\"):\n\n try:\n with open(model_path, 'wb') as model_file:\n pickle.dump(clf, model_file)\n except Exception as error:\n logging.error(\"Failed to export model to %s.\", model_path)\n raise error", "def write_vecs(self, vecs_fname):\r\n header = f'{self.vectors.shape[0]} {self.vectors.shape[1]}'\r\n np.savetxt(vecs_fname, np.hstack([self.words.reshape(-1, 1), self.vectors]), fmt='%s', header=header)", "def make_predictions_labels(dataset_name=\"Movie\", model_name=\"gicf\"):\r\n \r\n if dataset_name == \"Movie\":\r\n dataset = pickle.load(open(\"data/sentence_level_data/movie/movie_sentence_level_test_data.p\", \"rb\"))\r\n \r\n if model_name == \"gicf\":\r\n coefficient = pickle.load(open(\"results/trained_models/gicf/movie_beta_sentence.p\", \"rb\"))\r\n intercept = 0\r\n model = gensim.models.Doc2Vec.load(\"data/doc_2_vec/movie_model\")\r\n embeddings = [model.infer_vector(row[0].split(), alpha=0.1, steps=20) for row in dataset]\r\n embeddings = numpy.array(embeddings)\r\n \r\n elif model_name == \"w2v\":\r\n coefficient = numpy.transpose(pickle.load(open(\"results/trained_models/w2v/movie_w2v_coeffiecient.p\", \"rb\")))\r\n intercept = pickle.load(open(\"results/trained_models/w2v/movie_w2v_intercept.p\", \"rb\"))\r\n model = gensim.models.Word2Vec.load(\"results/trained_models/w2v/Movie_model_words\")\r\n embeddings = numpy.zeros((500,1), float)\r\n for row in dataset:\r\n word_vecs = numpy.zeros((500,1), float)\r\n for word in row[0].split():\r\n try:\r\n word_vec = numpy.expand_dims(model.wv[word], axis=1)\r\n except:\r\n continue\r\n word_vecs = numpy.append(word_vecs, word_vec, axis=1)\r\n text_vec = numpy.expand_dims(numpy.mean(word_vecs, axis=1), axis=1)\r\n embeddings = numpy.append(embeddings, text_vec, axis=1)\r\n embeddings = numpy.transpose(embeddings)\r\n embeddings = numpy.delete(embeddings, 0, 0)\r\n \r\n elif model_name == \"bow\":\r\n coefficient = numpy.transpose(pickle.load(open(\"results/trained_models/bow/movie_bow_coeffiecient.p\", \"rb\")))\r\n intercept = pickle.load(open(\"results/trained_models/bow/movie_bow_intercept.p\", \"rb\"))\r\n model = pickle.load(open(\"results/trained_models/bow//Movie_bow.p\", \"rb\"))\r\n embeddings = []\r\n for row in dataset:\r\n vec = model.transform([\" \".join(row[0])]).toarray()\r\n vec_av = vec / len(row[0])\r\n embeddings.append(vec_av)\r\n embeddings = numpy.reshape(numpy.array(embeddings), newshape=(-1,5000))\r\n \r\n \r\n elif dataset_name == \"Financial\":\r\n dataset = pickle.load(open(\"data/sentence_level_data/financial/financial_sentence_level_val_data.p\", \"rb\"))\r\n \r\n if model_name == \"gicf\":\r\n coefficient = pickle.load(open(\"results/trained_models/gicf/financial_beta_sentence.p\", \"rb\"))\r\n intercept = 0\r\n model = gensim.models.Doc2Vec.load(\"data/doc_2_vec/financial_model\")\r\n embeddings = [model.infer_vector(row[0].split(), alpha=0.1, steps=20) for row in dataset]\r\n embeddings = numpy.array(embeddings)\r\n \r\n elif model_name == \"w2v\":\r\n coefficient = numpy.transpose(pickle.load(open(\"results/trained_models/w2v/financial_w2v_coeffiecient.p\", \"rb\")))\r\n intercept = pickle.load(open(\"results/trained_models/w2v/financial_w2v_intercept.p\", \"rb\"))\r\n model = gensim.models.Word2Vec.load(\"results/trained_models/w2v/Financial_model_words\")\r\n embeddings = numpy.zeros((500,1), float)\r\n for row in dataset:\r\n word_vecs = numpy.zeros((500,1), float)\r\n for word in row[0].split():\r\n try:\r\n word_vec = numpy.expand_dims(model.wv[word], axis=1)\r\n except:\r\n continue\r\n word_vecs = numpy.append(word_vecs, word_vec, axis=1)\r\n text_vec = numpy.expand_dims(numpy.mean(word_vecs, axis=1), axis=1)\r\n embeddings = numpy.append(embeddings, text_vec, axis=1)\r\n embeddings = numpy.transpose(embeddings)\r\n embeddings = numpy.delete(embeddings, 0, 0)\r\n \r\n elif model_name == \"bow\":\r\n coefficient = numpy.transpose(pickle.load(open(\"results/trained_models/bow/financial_bow_coeffiecient.p\", \"rb\")))\r\n intercept = pickle.load(open(\"results/trained_models/bow/financial_bow_intercept.p\", \"rb\"))\r\n model = pickle.load(open(\"results/trained_models/bow/Financial_bow.p\", \"rb\"))\r\n embeddings = []\r\n for row in dataset:\r\n vec = model.transform([\" \".join(row[0])]).toarray()\r\n vec_av = vec / len(row[0])\r\n embeddings.append(vec_av)\r\n embeddings = numpy.reshape(numpy.array(embeddings), newshape=(-1,5000))\r\n \r\n scores = sigmoid_function(x=embeddings, beta_0=intercept, beta_1=coefficient)\r\n predictions = numpy.array([1 if score > 0.5 else 0 for score in scores])\r\n labels = numpy.array([row[1] for row in dataset])\r\n\r\n return predictions, labels, scores", "def extract_features(filepath, model='VGG16', write_to=None):\r\n\r\n # print('Extracting features')\r\n\r\n # Get the model\r\n # print('Acquiring model \"{}\"'.format(model), end='')\r\n m = named_model(model)\r\n # print('\\rAcquired model\\t\\t\\t\\t\\t')\r\n\r\n # Get the image filepaths\r\n filepath = filepath.replace('\\\\', '/')\r\n img_fps = []\r\n\r\n assert os.path.exists(filepath), \\\r\n 'Filepath does not exist: \"{}\"'.format(filepath)\r\n\r\n if os.path.isfile(filepath):\r\n ext = filepath.lower().rsplit('.', 1)[-1]\r\n assert ext in IMG_EXTS, \\\r\n 'Specified file \"{}\" is not in recognised image formats'.format(filepath)\r\n img_fps = img_fps.append(filepath)\r\n\r\n elif os.path.isdir(filepath):\r\n for fn in os.listdir(filepath):\r\n ext = fn.rsplit('.', 1)[-1]\r\n if ext in IMG_EXTS:\r\n img_fps.append(os.path.join(filepath, fn))\r\n\r\n else:\r\n raise ValueError('Filepath should be an image, or a directory containing images')\r\n\r\n # And the image filenames\r\n img_fns = [fp.replace('\\\\', '/').rsplit('/', 1)[-1] for fp in img_fps]\r\n\r\n # print('Found {} images'.format(len(img_fns)))\r\n\r\n # Run the extraction over each image\r\n features = []\r\n for (i, fp) in enumerate(img_fps):\r\n # print('\\rProcessing: {:.2f}%\\t\\t'.format((i + 1) / len(img_fps) * 100), end='', flush=True)\r\n features.append(_extract(fp, m))\r\n\r\n # print('\\nSuccess')\r\n\r\n # Make into a DataFrame and add an ID column\r\n features_df = DF(features, dtype=object)\r\n id_col = DF(img_fns, dtype=str)\r\n features_df.insert(0, 'ID', id_col)\r\n features_df.to_csv(write_to, index=False)\r\n\r\n return features_df", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the average distance between pairs of vectors in a given list of vectors.
def average_distance_between_vectors(vectors, distance): vectors = numpy.array(vectors) vectors = vectors - numpy.mean(vectors, axis=0) vectors = normalize(vectors) vectors = list(vectors) average_distance = utils.RunningAverage() for vector_1, vector_2 in itertools.combinations(vectors, r=2): # All pairs of vectors average_distance.update(distance(vector_1, vector_2)) return average_distance()
[ "def v_avg(vecs):\n xs = []\n ys = []\n\n for v in vecs:\n xs.append(v[0])\n ys.append(v[1])\n\n xresult = 0\n for x in xs:\n xresult += x\n\n yresult = 0\n for y in ys:\n yresult += y\n\n return xresult / len(xs), yresult / len(ys)", "def compute_average(vec_list):\r\n return np.sum(vec_list, axis = 0)/len(vec_list)", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def getAveragePositionFromList( positionsList ):\n \n vectors = [ vector.makeMVector( values = [x, y, z] ) for x, y, z in positionsList ]\n \n vectorsSum = vector.makeMVector()\n \n for v in vectors:\n \n vectorsSum += v\n \n vectorsAverage = vectorsSum / len( positionsList )\n \n return [ vectorsAverage[0], vectorsAverage[1], vectorsAverage[2] ]", "def vector_mean(vectors):\n\tn = len(vectors)\n\treturn scalar_multiply(1/n, vector_sum(vectors))", "def dist(u, v):\n return math.sqrt(sum((x - y)**2 for x,y in zip(u,v)))", "def centrality(vects):\n\n n = len(vects)\n\n # For each vector, find the average similarity to all the other\n # vectors. Use reference equality to avoid comparing with self.\n return [(sum([cosine_sim(vect, vect1)\n for vect1 in vects\n if vect is not vect1])\n / n)\n for vect in vects]", "def column_average(list_of_lists):\n return list(map(lambda x: sum(x)/len(x), zip(*list_of_lists)))", "def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)", "def avgDistSV(self, X, distFunc):\n self.separateInClasses()\n SV = self.class_sv\n dist = np.zeros((len(X), len(SV)))\n for i, x in enumerate(X): # each sample in X\n for j, c in enumerate(SV): # each class in SV\n for sv in c: # each support vector in class\n dist[i][j] += distFunc(x, sv)\n dist[i][j] /= len(c)\n\n return dist", "def calculate_mean_and_stdd(list_of_values):\n mean = 0\n stdd = 0\n set_size = len(list_of_values)\n for acc in list_of_values:\n mean += acc\n mean = mean/set_size\n for acc in list_of_values:\n stdd += np.power(acc - mean, 2)\n stdd = np.sqrt(stdd/set_size)\n\n return mean, stdd", "def euklid_distance_points(a, mean):\n \"\"\"for i in range(n):\n s += (a[i] - mean[i])**2\"\"\"\n #print(s)\n return math.sqrt(sum([(x-y)**2 for x,y in zip(a,mean)]))\n #return math.sqrt( (a-mean)**2)", "def get_average_of_elements(first_list, second_list):\n\n extract = []\n result = 0\n for element in first_list:\n if element not in second_list:\n extract.append(element)\n for element in extract:\n result += element\n n = len(extract)\n return result/n", "def pearson_distance(vector1, vector2) :\n sum1 = sum(vector1)\n sum2 = sum(vector2)\n\n sum1Sq = sum([pow(v,2) for v in vector1])\n sum2Sq = sum([pow(v,2) for v in vector2])\n\n pSum = sum([vector1[i] * vector2[i] for i in range(len(vector1))])\n\n num = pSum - (sum1*sum2/len(vector1))\n den = math.sqrt((sum1Sq - pow(sum1,2)/len(vector1)) * (sum2Sq - pow(sum2,2)/len(vector1)))\n\n if den == 0 : return 0.0\n return 1.0 - num/den", "def _avg_triples(self,triples):\n return map(lambda x:sum(x)*1.0/len(x),zip(*triples))", "def avg(vector):\n if len(vector) == 0:\n return 0\n return sum(vector) / len(vector)", "def euclideanDist(vectorA, vectorB):\n diff = vectorA - vectorB\n return np.sqrt(np.dot(diff,diff))", "def sum_pairwise(self, segments):\n\n sum_pair = 0.0\n segments = list(segments)\n for ind1 in range(len(segments)):\n for ind2 in range(ind1+1, len(segments)):\n sum_pair += hypot(segments[ind1].startx - segments[ind2].startx, segments[ind1].starty - segments[ind2].starty)\n return sum_pair", "def add(*vectors):\n return (sum([v[0] for v in vectors]), sum([v[1] for v in vectors]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }