query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
extension of model file to save to this should depends on what format model is saved under
def _get_model_file_extension(self): pass
[ "def _get_model_filename(self) -> str:\n model_filename = f'{self.model_dir}/{self.description}.{self._get_model_file_extension()}'\n return model_filename", "def _save_model(self, out_file):\n pass", "def _get_model_save_path(self):\n return '{0}/{1}.ckpt'.format(self._get_save_dir(), self.model_name)", "def save_model(self, filename):\n model = self.H if self.tame else self.Q\n filename = filename + \".p\" if not filename.endswith(\".p\") else filename\n with open(MODELS_DIR.joinpath(filename), \"wb\") as f:\n pickle.dump(model, f)", "def _file_extension_default(self):\n return '.ml'", "def ext(self):\n return os.path.splitext(self)[1]", "def filename_format(self):\n raise NotImplementedError", "def file_extension(self) -> str:\n return self._file_extension", "def __saveModel(self, filename):\n if filename != None and filename != \"\":\n joblib.dump(self.model, filename)", "def _saveModel(self):\r\n\t\tdelimiter = self._delimiterBox.currentSelected()\r\n\t\theader = self._headerCheckBox.isChecked() # column labels\r\n\t\tif self._filename is None:\r\n\t\t\tfilename = self._filenameLineEdit.text()\r\n\t\telse:\r\n\t\t\tfilename = self._filename\r\n\t\text = os.path.splitext(filename)[1].lower()\r\n\t\tindex = False # row labels\r\n\t\tencodingIndex = self._encodingComboBox.currentIndex()\r\n\t\tencoding = self._encodingComboBox.itemText(encodingIndex)\r\n\t\tencoding = _calculateEncodingKey(encoding.lower())\r\n\t\ttry:\r\n\t\t\tdataFrame = self._model.dataFrame()\r\n\t\texcept AttributeError as err:\r\n\t\t\traise AttributeError('No data loaded to export.')\r\n\t\telse:\r\n\t\t\tprint(\"Identifying export type for {}\".format(filename))\r\n\t\t\ttry:\r\n\t\t\t\tif ext in ['.txt','.csv']:\r\n\t\t\t\t\tdataFrame.to_csv(filename, encoding=encoding, header=header, index=index, sep=delimiter)\r\n\t\t\t\telif ext == '.tsv':\r\n\t\t\t\t\tsep = '\\t'\r\n\t\t\t\t\tdataFrame.to_csv(filename, encoding=encoding, header=header, index=index, sep=delimiter)\r\n\t\t\t\telif ext in ['.xlsx','.xls']:\r\n\t\t\t\t\tdataFrame.to_excel(filename, encoding=encoding, header=header, index=index, sep=delimiter)\r\n\t\t\texcept IOError as err:\r\n\t\t\t\traise IOError('No filename given')\r\n\t\t\texcept UnicodeError as err:\r\n\t\t\t\traise UnicodeError('Could not encode all data. Choose a different encoding')\r\n\t\t\texcept Exception:\r\n\t\t\t\traise\r\n\t\t\tself.signalExportFilenames.emit(self._model._filePath, filename)", "def default_save_as_fname(input_fname):\n parts = input_fname.split('.')\n if len(parts) == 1:\n return parts[0] + \"_hrv\"\n\n return '.'.join(parts[:-1]) + '_hrv'", "def save_model(model, suffix=None):\n # Create model directory with current time\n modeldir = os.path.join(\"/content/drive/MyDrive/dog_breed_classification/models\",\n datetime.datetime.now().strftime(\"%Y%m%d-%H%M%s\"))\n model_path = modeldir + \"-\" + suffix + \".h5\" # save format of model\n print(f\"Saving model to: {model_path}...\")\n model.save(model_path)\n return model_path", "def _filename_from_ext(self, ext: str) -> str:\n return os.path.join(self._DIRECTORY, self._name + \".\" + ext)", "def save_model(self):\n np.savetxt(\"weighth.csv\", self.wh, delimiter=\",\")\n np.savetxt(\"weighto.csv\", self.wo, delimiter=\",\")", "def trained_model_filepath(self) -> str:\n return f'/usr/src/app/audit/science/{self.location}/models/{self.model_id}'", "def save_model(self, save_folder: str, save_file: str):\n\n pass", "def save(self, epoch, file_path=\"output/bert_trained.model\"):\n output_path = file_path + \".ep%d\" % epoch\n torch.save(self.model.cpu(), output_path)\n self.model.to(self.device)\n print(\"EP:%d Model Saved on:\" % epoch, output_path)\n return output_path", "def get_file_extension(blender_output_format):\n # just png for now\n return blender_output_format.lower()", "def make_model_pickle_path(self):\r\n return path.join(self.pickle_path, \"model.pickle\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save the model to out_file
def _save_model(self, out_file): pass
[ "def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)", "def __saveModel(self, filename):\n if filename != None and filename != \"\":\n joblib.dump(self.model, filename)", "def save_model(self):\n f1 = open(self.name + '_' + 'words', 'w')\n f2 = open(self.name + '_' + 'word_lengths', 'w')\n f3 = open(self.name + '_' + 'stems', 'w')\n f4 = open(self.name + '_' + 'sentence_lengths', 'w')\n f5 = open(self.name + '_' + 'word_pair', 'w')\n f1.write(str(self.words))\n f2.write(str(self.word_lengths))\n f3.write(str(self.stems))\n f4.write(str(self.sentence_lengths))\n f5.write(str(self.word_pair))\n f1.close() \n f2.close() \n f3.close() \n f4.close()\n f5.close()", "def save_model(self):\n np.savetxt(\"weighth.csv\", self.wh, delimiter=\",\")\n np.savetxt(\"weighto.csv\", self.wo, delimiter=\",\")", "def save_model(self,fileName):\n \n joblib.dump(self.predictor, fileName + '.pkl') \n \n return", "def save_model(self):\n return joblib.dump(self.pipeline, \"trained_TaxiFareModel.joblib\")", "def save_model(self):\n\n\t\tmodel_file = open(self.model_path,'wb')\n\t\tpickle.dump(self.model, model_file)\n\t\tmodel_file.close()", "def save_model(self, export_fn):\n torch.save(self.ae.state_dict(), export_fn)", "def pickleModel(self):\n print 'Saving model to file...'\n logit = LogisticRegression(C=self.C, penalty='l1')\n logit.fit(self.X_mapped,self.y)\n \n with open('model','w') as myFile:\n pickle.dump({'logit':logit,'degree':self.degree,'useInverse':self.useInverse,'mean':self.mean,'stdev':self.stdev,'n':self.n,'m':self.m},myFile)", "def save_model(self, export_path):\n torch.save(self.net.c, export_path)\n # torch.save(self.net.state_dict(), export_path)", "def save_model(self, filepath='model.h5'):\n self.model.save(filepath)", "def __export_models(self):\n print('Exporting...')\n\n with open('models.py', 'w') as f:\n f.write(self.file_code)\n\n print('Finished!')", "def save_model(self, filename):\n model = self.H if self.tame else self.Q\n filename = filename + \".p\" if not filename.endswith(\".p\") else filename\n with open(MODELS_DIR.joinpath(filename), \"wb\") as f:\n pickle.dump(model, f)", "def save_model(self, save_folder: str, save_file: str):\n\n pass", "def savemodel(self,filedir):\n\t\t__btm__.savemodel(self._handle,filedir)", "def save(self, output, data):\r\n pass", "def export_model(self, output_model_path: Path) -> None:\n directory = output_model_path.parent\n directory.mkdir(parents=True, exist_ok=True)\n\n models_temp_dir = self.working_directory.joinpath(\"model_archive_temp\")\n model = G2PModel.empty(output_model_path.stem, root_directory=models_temp_dir)\n model.add_meta_file(self)\n model.add_fst_model(self.working_directory)\n model.add_sym_path(self.working_directory)\n if directory:\n os.makedirs(directory, exist_ok=True)\n model.dump(output_model_path)\n model.clean_up()\n # self.clean_up()\n logger.info(f\"Saved model to {output_model_path}\")", "def to_file(fpath, models):\n \n logger.info(\"Writing Model Set to file: %s\" % fpath)\n with open(fpath, 'w') as out_file:\n out = csv.writer(out_file, delimiter='\\t')\n out.writerow([i for i, mid in enumerate(models)])\n out.writerow([models[mid].id for mid in models])\n out.writerow([models[mid].to_dict() for mid in models])", "def export_model(model, filename):\n if model == 'user':\n query = User.select()\n else:\n query = Page.select()\n query_to_file(query, filename)\n return \"Done\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates sklearn like classification report dictionary
def _calculate_classification_report(self) -> dict: pass
[ "def classification_report(self,X,y):\n y_pred = self.predict(X)\n clfr = classification_report(y, y_pred)\n\treturn clfr", "def createNBClassifier(data):\n\n # for each feature, need to calculate probability of True/False\n\n # get the 2 classes\n classes = set([])\n for d in data:\n classes.add(d['class'])\n if len(classes) == 2:\n break\n\n # simple set labels\n true_label = classes.pop()\n false_label = classes.pop()\n\n # for each feature we need to calculate probabilities of true/false\n keys = filter( lambda x: x != 'class', data[0].keys())\n\n classifier = {}\n totalnos = len(data)\n\n # does a loop over all elements in list for every key\n # can be optimized to one loop, TODO\n\n for k in keys:\n probset = {}\n probset['true'] = {}\n probset['false'] = {}\n\n for d in data:\n if d['class'] == true_label:\n probset['true'][d[k]] = probset['true'].get(d[k], 0) + 1\n probset['false'][d[k]] = probset['false'].get(d[k], 0) + 0\n else:\n probset['false'][d[k]] = probset['false'].get(d[k], 0) + 1\n probset['true'][d[k]] = probset['true'].get(d[k], 0) + 0\n\n # arbitrary cutoff to decide when the number of keys are too many\n if len(probset['true'].keys() + probset['false'].keys()) > 0.3*len(data):\n # too many keys present\n # discrete probability does not make sense\n # we need to model a gaussian distribution\n #probset = {}\n probset['gaussian'] = True\n\n # obtain mean and standard deviation\n true_nos = []\n false_nos = []\n for d in data:\n if d['class'] == true_label:\n true_nos.append(float(d[k]))\n else:\n false_nos.append(float(d[k]))\n \n true_nos = np.array(true_nos)\n false_nos = np.array(false_nos)\n\n probset['true_mean'] = float(np.mean(true_nos))\n probset['true_std'] = float(np.std(true_nos))\n\n probset['false_mean'] = float(np.mean(false_nos))\n probset['false_std'] = float(np.std(false_nos))\n\n else: \n # use ordinary distribution\n probset['gaussian'] = False\n\n # convert to probabilities\n for p in probset['true'].keys():\n probset[p] = float(probset['true'][p])/totalnos\n for p in probset['false'].keys():\n probset[p] = float(probset['false'][p])/totalnos\n\n # add it master dict\n classifier[k] = probset\n\n\n # add true and false labels\n classifier['true'] = true_label\n classifier['false'] = false_label\n\n #print classifier\n return classifier", "def classification_report(self, X, y, preprocess=True):\n y_pred = self.predict(X, preprocess=preprocess)\n y_true = y\n report = classification_report(y_true, y_pred)\n print(\" Classification Report: \".center(100, \"=\") + \"\\n\")\n print(report)\n return report", "def create_dicts(self):\n print(\"There are \" + str(self.matrix.shape[1]) + \" features and \")\n print(str(self.matrix.shape[0]) + \" instances to consider\")\n possible_labels = list(set(self.labels))\n matricies = {}\n ig_dict = {}\n indexes_dict = {}\n sums = {}\n probabilities = {}\n total_sum = float(self.matrix.sum())\n ig_term1 = 0\n for label in possible_labels:\n row_slice = [True if val == label else False for val in self.labels]\n matricies[label] = self.matrix[row_slice, :]\n sums[label] = float(matricies[label].sum())\n probabilities[label] = max(sums[label] / total_sum, 0.00000000001)\n ig_term1 += probabilities[label] * log(probabilities[label])\n\n ig_term1 *= -1\n print(\"Calculating information gain for feature: \")\n print(\"\\r0\", end='')\n for col_index in range(len(self.vocab)):\n if col_index % 100 == 0:\n print(\"\\r\" + str(col_index), end=\"\")\n term = self.vocab[col_index]\n t_count = max(float(self.matrix[:, col_index].sum()), 0.00000000001)\n label_counts = {}\n ig_term2 = 0\n ig_term3 = 0\n p_t = float(t_count) / total_sum\n p_tbar = 1 - p_t\n for label in possible_labels:\n try:\n label_counts[label] = float(a_matrix[:, col_index].sum())\n except:\n label_counts[label] = 0.0\n p_c1_t = max(label_counts[label] / t_count, 0.00000000001)\n ig_term2 += p_c1_t * log(p_c1_t)\n p_c1_tbar = max((sums[label] - label_counts[label]) / (total_sum - t_count), 0.00000000001)\n ig_term3 += p_c1_tbar * log(p_c1_tbar)\n\n ig_term2 *= p_t\n ig_term3 *= p_tbar\n ig = ig_term1 + ig_term2 + ig_term3\n # print ig\n ig_dict[term] = ig\n indexes_dict[term] = col_index\n\n self.ig_dict = ig_dict\n self.indexes_dict = indexes_dict", "def parse_classification_report(clfreport):\n lines = clfreport.split('\\n')\n # Remove empty lines\n lines = list(filter(lambda l: not len(l.strip()) == 0, lines))\n\n # Starts with a header, then score for each class and finally an average\n header = lines[0]\n cls_lines = lines[1:-1]\n avg_line = lines[-1]\n\n print(header)\n print(cls_lines)\n print(avg_line)\n assert header.split() == ['precision', 'recall', 'f1-score', 'support']\n assert avg_line.split()[1] == 'avg'\n\n # We cannot simply use split because class names can have spaces. So instead\n # figure the width of the class field by looking at the indentation of the\n # precision header\n cls_field_width = len(header) - len(header.lstrip())\n\n # Now, collect all the class names and score in a dict\n\n def parse_line(l):\n \"\"\"Parse a line of classification_report\"\"\"\n cls_name = l[:cls_field_width].strip()\n precision, recall, fscore, support = l[cls_field_width:].split()\n precision = float(precision)\n recall = float(recall)\n fscore = float(fscore)\n support = int(support)\n return (cls_name, precision, recall, fscore, support)\n\n data = collections.OrderedDict()\n for l in cls_lines:\n ret = parse_line(l)\n cls_name = ret[0]\n scores = ret[1:]\n data[cls_name] = scores\n\n # average\n data['avg'] = parse_line(avg_line)[1:]\n\n return data", "def classification_report_csv(report, output_file):\n report_data = []\n lines = report.split('\\n')\n for line in lines[2:-3]:\n row = {}\n row_data = line.split(' ')\n row['class'] = row_data[0]\n row['precision'] = float(row_data[1])\n row['recall'] = float(row_data[2])\n row['f1_score'] = float(row_data[3])\n row['support'] = float(row_data[4])\n report_data.append(row)\n dataframe = pd.DataFrame.from_dict(report_data)\n dataframe.to_csv(output_file + '.csv', index = False)", "def classification_report(self):\n message = \"\"\n total = [0, 0, 0, 0, 0, 0]\n for index, name in enumerate(self.class_names):\n # calculate metrics\n acc, p, r, f1, tpr, fpr = super()._cal_metrics(self.confusion_matrix[index][0],\n self.confusion_matrix[index][1],\n self.confusion_matrix[index][2],\n self.confusion_matrix[index][3])\n number = self.confusion_matrix[index][0] + self.confusion_matrix[index][1]\n metrics = [acc, p, r, f1, fpr, number]\n for i, metric in enumerate(metrics):\n if math.isnan(metric):\n metric = 0\n total[i] += metric\n title, message, end = super().report_template(message, name, self.max_length, metrics)\n title, message, end = super().report_template(message, \"TOTAL\", self.max_length, total)\n return title + message + end", "def classification_report_df(report):\n report_data = []\n lines = report.split('\\n')\n for line in lines[2:-6]:\n row = {}\n row_data = line.split(' ')\n row_data = list(filter(None, row_data))\n row['class'] = row_data[0]\n row['precision'] = float(row_data[1])\n row['recall'] = float(row_data[2])\n row['f1_score'] = float(row_data[3])\n row['support'] = float(row_data[4])\n report_data.append(row)\n df_report = pd.DataFrame.from_dict(report_data)\n \n return df_report", "def report(model_name, y_test, y_pred, le=None):\n \n # Estimation: Confusion Matrix & classification-report \n _confusion_matrix = confusion_matrix(y_test, y_pred)\n _classification_report = classification_report(y_test, y_pred, target_names=le.classes_, output_dict=False)\n _classification_report_dict = classification_report(y_test, y_pred, target_names=le.classes_, output_dict=True)\n\n # For Multiclass AUC\n _auc_dict = roc_auc_score_multiclass(y_test, y_pred)\n _auc_dict = dict((le.classes_[key], value) for (key, value) in _auc_dict.items())\n# _auc = roc_auc_score(y_test, y_pred, multi_class='ovr')\n# _fpr, _tpr, _thresholds = roc_curve(y_test, y_pred)\n\n with open('result/' + model_name + '/' + model_name + '_output.txt', 'w') as f:\n f.write(\"\\n---Confusion Matrix---\\n\")\n f.write(np.array2string(_confusion_matrix, separator=', '))\n f.write(\"\\n---Classification Report---\\n\")\n f.write(_classification_report)\n f.write(\"\\n---ROC AUC Score---\\n\")\n f.write(str(_auc_dict))\n# f.write(_auc)\n \n print('\\n-----Confusion Matrix-----\\n')\n print(_confusion_matrix)\n print('\\n-----Classification Report-----\\n')\n print(_classification_report)\n print('\\n-----AUC Dictionary-----\\n')\n print(str(_auc_dict))\n \n metrix = ['precision', 'recall', 'f1-score']\n# metrix = ['precision', 'recall', 'f1-score', 'support']\n xKeys = le.classes_\n for met in metrix:\n xValues = []\n for target_name in le.classes_:\n xValues += [_classification_report_dict[target_name][met]]\n\n pyplot.title(met)\n pyplot.bar(range(len(xValues)), list(xValues), align='center')\n pyplot.xticks(range(len(xKeys)), list(xKeys))\n pyplot.show()\n\n pyplot.title('AUC')\n pyplot.bar(range(len(_auc_dict)), list(_auc_dict.values()), align='center')\n pyplot.xticks(range(len(_auc_dict)), list(_auc_dict.keys()))\n pyplot.show()\n \n# # plot the roc curve for the model\n# # pyplot.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')\n# pyplot.plot(_fpr, _tpr, marker='.', label=model_name)\n# # axis labels\n# pyplot.xlabel('False Positive Rate')\n# pyplot.ylabel('True Positive Rate')\n# # show the legend\n# pyplot.legend()\n# # show the plot\n# pyplot.show()\n \n return _confusion_matrix, _classification_report, _auc_dict, _classification_report_dict", "def metrics(self):\n print(f\"\\nML_solver's supported metrics overview: \\n\")\n reg_metrics = [func.__name__ for func in metrics_dict.get(\"regression\")]\n clf_metrics = [\n func.__name__ for func in metrics_dict.get(\"classification\")\n ]\n\n df_metrics = (\n pd.DataFrame.from_dict(\n {\"regression\": reg_metrics, \"classification\": clf_metrics},\n orient=\"index\",\n )\n .transpose()\n .fillna(\"----\")\n )\n\n df_metrics = self._tableize(df_metrics)\n print(df_metrics)", "def get_multiclass_training_data():\n fname = \"data/dataset.csv\"\n dataframe = load_data(fname)\n dictionary = extract_dictionary(dataframe)\n X_train = generate_feature_matrix(dataframe, dictionary)\n Y_train = dataframe['label'].values.copy()\n\n return (X_train, Y_train, dictionary)", "def _create_classification_csv_files(self, filenames, num_lines):\n\n dict_counts = {\n 'red': 0,\n 'blue': 0,\n 'green': 0,\n '_all': 0,\n }\n dict_correct_counts = {\n 'red': 0,\n 'blue': 0,\n 'green': 0,\n '_all': 0,\n }\n index = 0\n for filename in filenames:\n full_file_name = os.path.join(self._test_dir, filename)\n with open(full_file_name, 'w') as f:\n writer = csv.writer(f)\n for r in range(num_lines):\n target = random.choice(['red', 'blue', 'green'])\n predicted = random.choice(['red', 'blue', 'green'])\n dict_counts[target] += 1\n dict_counts['_all'] += 1\n if target == predicted:\n dict_correct_counts[target] += 1\n dict_correct_counts['_all'] += 1\n index += 1\n writer.writerow([target, predicted, index])\n\n dict_accuracy = {\n 'red': float(dict_correct_counts['red']) / dict_counts['red'],\n 'blue': float(dict_correct_counts['blue']) / dict_counts['blue'],\n 'green': float(dict_correct_counts['green']) / dict_counts['green'],\n '_all': float(dict_correct_counts['_all']) / dict_counts['_all'],\n }\n return dict_counts, dict_accuracy", "def _train_all(names, classifiers,\r\n X, y, X_train, X_test, y_train, y_test,\r\n stats=True, predict=\"\"):\r\n ## ignore numpy warnings\r\n from warnings import filterwarnings\r\n filterwarnings('ignore')\r\n ## cycle around each classifier\r\n classes = {1:\"LIKELY\", -1:\"UNLIKELY\"}\r\n score = {1:0, -1:0}\r\n trusts = {}\r\n predictions = {}\r\n for name, classifier in zip(names, classifiers):\r\n ## train each classifier\r\n classifier.fit(X_train, y_train)\r\n if stats == True:\r\n _get_statistics(name, classifier, X, y, X_test, y_test)\r\n if predict != \"\":\r\n ## Make prediction\r\n prediction = classifier.predict(predict)[0]\r\n\r\n ## Increment counter for relevant score\r\n score[prediction] += 1\r\n predictions.update({name:prediction})\r\n \"\"\"\r\n reveal expected true negatives, false positives,\r\n false negatives, true positives\r\n \"\"\"\r\n tn, fp, fn, tp = c_m(y_test, classifier.predict(X_test)).ravel()\r\n ## trust is the amount of time that the prediction was correct\r\n trust_score = tp/(tp + fp) if prediction == 1 else tn/(tn + fn)\r\n trust_score = round((trust_score * 100), 2)\r\n trusts.update({name:trust_score})\r\n if predict != \"\":\r\n scores = pd.DataFrame({'Recurrence':predictions,\r\n 'Confidence':trusts})\r\n pred_weight = scores.Recurrence * scores.Confidence\r\n weights = pd.DataFrame({'Weights':pred_weight})\r\n scores['Recurrence'] = scores['Recurrence'].apply(lambda x: classes[x])\r\n print(scores)\r\n classification = 1 if weights.Weights.mean() > 0 else -1\r\n print(f\"\\nRecurrence judged {classes[classification]} at \\\r\n{round(abs(weights.Weights.mean()),2)} % confidence\")\r\n print(f\"Poll of classifiers results:\")\r\n for index in score:print(f\"{classes[index]}: \\t\\t{score[index]}\")", "def summarize_by_class(X, y):\n\n separated = separate_by_class(X, y)\n summaries = dict()\n\n for k in separated.keys():\n summaries[k] = summarize(np.array(separated[k]))\n\n return summaries", "def get_data():\n data, targets = make_classification(\n n_samples=1000,\n n_features=45,\n n_informative=12,\n n_redundant=7,\n random_state=134985745,\n )\n return data, targets", "def build_classifier():\n X = pd.read_csv(os.path.join(PROJECT_DIR, \"train_features.csv\"), skiprows=1, header=None).as_matrix()\n Y = pd.read_csv(os.path.join(PROJECT_DIR, \"train_labels.csv\"), header=None).as_matrix().ravel()\n\n # Split data into training and cross validation sets\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=3131)\n\n std_scale = preprocessing.StandardScaler().fit(X_train)\n X_train_std = std_scale.transform(X_train)\n # X_test_std = std_scale.transform(X_test)\n\n pca_std = PCA(n_components=13).fit(X_train_std)\n X_train_std = pca_std.transform(X_train_std)\n # X_test_std = pca_std.transform(X_test_std)\n\n clf = svm.SVC(C=5)\n clf.fit(X_train_std, y_train)\n\n # Compare predictions of classifier on cross-validation sets with ground-truths\n # print clf.score(X_test_std, y_test)\n return clf, std_scale, pca_std", "def main(): # Main function call #{{{\n\n my_file = 'training.csv'\n chi_squared_file='chisquared.csv'\n header = 0 # Turn this option on if there is a header in the csv being read!!!!\n class_label = 'Class'\n root = Tree()\n root.file_read(my_file, header)\n root.chi_squared_read(chi_squared_file)\n root.choose_comparator(class_label)\n root.file_write(\"output.dict\")\n # \n #my_file='training.csv'\n #my_file='photos.csv'\n #classifier='Class'\n #PROBABILITY='0.050'\n #root = Tree()\n #print root.chi_squared_headers\n #print root.chi_squared_data\n #root.write()\n #print root.chi_squared(classifier, PROBABILITY)\n #temp_classifier='Family'\n #print root.compute_max_information_gain(temp_classifier)\n #information_gain=root.information_gain(temp_classifier, 'Cartoon')\n #print (information_gain)\n #temp_classifier='Cartoon'\n #information_gain-=root.entropy(temp_classifier)\n #print (information_gain)\n #root.file_write(\"output.dict\")\n #my_file='altitude.csv'\n #my_file = 'photos.csv'\n #classifier='Family'\n #root.choose_comparator(class_label)\n #root.write()\n #temp_classifier = 'Family'\n #class_label = 'Class'\n #attribute = 'Direction'\n #print root.compute_max_information_gain(class_label)\n #print information_gain=root.information_gain(classifier, attribute)\n #print (information_gain)\n #temp_classifier='Cartoon'\n #information_gain-=root.entropy(temp_classifier)\n #print (information_gain)\n #root.file_write(\"output.dict\")\n #print '========================================'\n #print root.base_gini_index(class_label)\n print '========================================'\n #print root.attribute_impurity(class_label, attribute)\n #datum = {'key':'20000', 'value':'GCTGGGCCCTGGGCTTCTACCCTGCGGAGATCACACTGACCTGGCAGCGGGATGGCGAGG'}\n #print root.predict(class_label, datum)", "def get_classification_statistics(self, y_pred):\n misclassified = {}\n confused = {}\n indices_misclassified = np.flatnonzero(self.y_test != y_pred)\n for label in self.label_names.keys():\n # count how many points were misclassified in this class\n if sum(self.y_test==label)==0:\n misclassified[label] = 100.0\n else :\n misclassified[label] = np.round(1 - sum(self.y_test[indices_misclassified]==label)/sum(self.y_test==label),2)*100\n # find what is the class these points were the most classified as,\n # and the amount of this confusion\n lab_class = y_pred[np.flatnonzero(self.y_test==label)]\n lab_class = lab_class[lab_class != label]\n count_class = np.array([np.sum(lab_class==lab) for lab in self.label_names.keys()])\n confused_class = np.argmax(count_class)\n if len(lab_class)==0:\n confused[label] = [\"\", 0]\n else :\n confused[label] = [self.label_names[confused_class], np.round(count_class[confused_class]/len(lab_class),2)*100]\n\n d_name = max([len(self.label_names[label]) for label in self.label_names.keys()])+2\n f = \" - class {0:<%d} : {1:>5}%% correctly classified, else mainly confused with {2:>%d} (proportion : {3:>5}%%)\" % (d_name, d_name)\n\n print(\"\\nMisclassification statistics :\")\n for label in self.label_names.keys():\n print(f.format(\n \"'\"+self.label_names[label]+\"'\",\n str(misclassified.get(label, 0))[:5],\n \"'\"+str(confused.get(label,[\"\",0])[0])+\"'\",\n str(confused.get(label,[\"\",0])[1])[:5]))\n print(\"\")", "def report2dict(cr):\n tmp = list()\n class_data = defaultdict(dict)\n for row in cr.split(\"\\n\"):\n parsed_row = [x for x in row.split(\" \") if len(x) > 0]\n if len(parsed_row) > 0:\n tmp.append(parsed_row)\n measures = tmp[0]\n for row in tmp[1:]:\n class_label = row[0]\n for j, m in enumerate(measures):\n class_data[class_label][m.strip()] = float(row[j + 1].strip())\n return class_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets sizes for training and test sets
def _get_sizes(self) -> int: pass
[ "def test_size(self):\n return self.__test_batches * len(self.__sources)", "def _train_batch_sizes(self):\n for device in device_lib.list_local_devices():\n # TODO(b/141475121): We need some way to check which batch sizes would\n # work using a public API.\n if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':\n # Avoid OOM errors with larger batch sizes, which seem to cause errors\n # later on even if caught.\n #\n # TODO(allenl): Base this on device memory; memory limit information\n # during the test seems to exclude the amount TensorFlow has allocated,\n # which isn't useful.\n if 'K20' in device.physical_device_desc:\n return (16,)\n # Quardro P1000.\n if 'P1000' in device.physical_device_desc:\n return (16,)\n if 'P100' in device.physical_device_desc:\n return (16, 32, 64)\n\n if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':\n return (32,)\n return (16, 32)", "def target_sizes(self):\n return Counter(self.targets.values())", "def size(self):\n return sum([len(x) for x in self.computed_features.values()])", "def get_dataset_size(self):\n keys = self.get_keys()\n\n dataset_size = 0\n for key in keys:\n image = self.get_image(key)\n image.load_from_uri()\n dataset_size += image.get_blob().nbytes\n\n return dataset_size", "def part_sizes(self):\n\n return self._part_sizes", "def testcases_length(self):\n total = self.S(len(self.nodes), self.number_of_partitions)\n total *= len(self.target_nodes)\n total **= self.number_of_rounds\n return total", "def _score_batch_sizes(self) -> Iterable[int]:\n if get_batchnorm_modules(self.instance):\n return [self.batch_size]\n return [1, self.batch_size]", "def sizes(self):\n return np.array([entry.data[\"size\"] for entry in self._entries])", "def test_net_sizes():\n\n # Test 1\n net = neuralnet.FFNN([2, 2, 3, 3], 3, 3)\n expected_layers = [2, 2, 3, 3, 3]\n expected_weights = [[4, 4], [3, 3], [3, 3, 3], [4, 4, 4], [4, 4, 4]]\n expected_weight_number = 47\n\n test_net_size(net, expected_layers, expected_weights, expected_weight_number)\n\n # Test 2 - the size I've been playing with\n net = neuralnet.FFNN([18], 31, 4)\n expected_layers = [18, 4]\n # 18 32s, 4 19s\n expected_weights = [[32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32],\n [19, 19, 19, 19]]\n expected_weight_number = 652\n\n test_net_size(net, expected_layers, expected_weights, expected_weight_number)\n\n # Test 3 - The size of the easy way to play\n net = neuralnet.FFNN([13], 21, 5)\n expected_layers = [13, 5]\n # 13 22s, 5 14s\n expected_weights = [[22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22],\n [14, 14, 14, 14, 14]]\n expected_weight_number = 356\n\n test_net_size(net, expected_layers, expected_weights, expected_weight_number)", "def calculate_train_size():\n\timport os\n\t# projs = [\"Apache\", \"Hipacc\", \"SQL\", \"rs-6d-c3-obj1\", \"wc-6d-c1-obj1\"]\n\t# numeric projects\n\tprojs1 = ['rs-6d-c3-obj1', 'rs-6d-c3-obj2', 'sol-6d-c2-obj1', 'sol-6d-c2-obj2', 'wc+rs-3d-c4-obj1', 'wc+rs-3d-c4-obj2', 'wc+sol-3d-c4-obj1', 'wc+sol-3d-c4-obj2', 'wc+wc-3d-c4-obj1', 'wc+wc-3d-c4-obj2', 'wc-3d-c4-obj1', 'wc-3d-c4-obj2', 'wc-5d-c5-obj1', 'wc-5d-c5-obj2', 'wc-6d-c1-obj1', 'wc-6d-c1-obj2', 'wc-c1-3d-c1-obj1', 'wc-c1-3d-c1-obj2', 'wc-c3-3d-c1-obj1', 'wc-c3-3d-c1-obj2']\n\t# boolean projects\n\tprojs2 = ['AJStats', 'Apache', 'BerkeleyC', 'BerkeleyJ', 'clasp', 'Dune', 'Hipacc', 'HSMGP_num', 'LLVM', 'lrzip', 'sac', 'spear', 'SQL', 'WGet', 'x264', 'XZ']\n\n\tave_sub_train = []\n\tprojs = projs1 + projs2\n\tfor proj in projs:\n\t\troot_dir = \"../experiment/parse_data/sub_train/\" + proj + \"/\"\n\t\tlst_name = [i for i in os.listdir(root_dir)]\n\t\ttrain_set = []\n\t\tfor csv in lst_name:\n\t\t\tpdcontent = pd.read_csv(root_dir + csv)\n\t\t\ttrain_set.append(len(pdcontent))\n\t\tprint(train_set, \":\", np.mean(train_set))\n\t\t# print(np.mean(train_set))\n\t\tave_sub_train.append(np.mean(train_set))\n\tprint(ave_sub_train)\n\t#[39.100000000000001, 38.280000000000001, 37.219999999999999, 45.140000000000001, 26.84, 27.52, 24.82, 28.359999999999999, 28.039999999999999, 24.66, 34.0, 33.700000000000003, 38.18, 39.299999999999997, 34.579999999999998, 41.280000000000001, 34.280000000000001, 38.039999999999999, 35.280000000000001, 33.619999999999997, 26.859999999999999, 27.699999999999999, 57.840000000000003, 26.48, 39.939999999999998, 29.960000000000001, 26.620000000000001, 33.219999999999999, 39.799999999999997, 33.5, 31.219999999999999, 34.359999999999999, 29.84, 27.559999999999999, 42.740000000000002, 30.440000000000001]\n\treturn ave_sub_train", "def numSets(self):\n return self.sets", "def test_partition_sizes(self):\n assert self.state.partition_sizes == (3, 4, 5, 6, 7, 8, 9)", "def get_batch_size(self):\n pass", "def embeddings_size(self) -> int:", "def get_art_sizes( self ):\n\n return self.art_fields[\"sizes\"]", "def n_sets(self):\n return self._sets_count()", "def runnersizes(self):\n result = []\n for runner in self.runners: result.append(\"%s - %s\" % (runner.queue.qsize(), runner.name))\n return result", "def hlm_sizes_in_use(self) -> Set[int]:\n\n sizes = set()\n for component in self._template:\n if not isinstance(component, shrike.template.HeapManip):\n continue\n sizes.update(component.sizes)\n\n return sizes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return substrings of length n in both a and b
def substrings(a, b, n): # TODO la = len(a) lb = len(b) sub_a = [] sub_b = [] sub = [] for i in range(la-n+1): sub_a.append(a[i:i+n]) for j in range(lb-n+1): sub_b.append(b[j:j+n]) for k in sub_a: if k in sub_b: sub.append(k) sub = set(sub) return sub
[ "def substrings(a, b, n):\n\n result_a = [a[i:i+n] for i in range(len(a))]\n result_b = [b[i:i+n] for i in range(len(b))]\n\n return compare_lists(result_a, result_b)", "def getSubstrings(a, n):\n\n # Get substrings from string\n substrings = set()\n for i in range(0, len(a) - n + 1):\n substrings.add(a[i:i + n])\n\n return substrings", "def LCStr(a,b):\n L = np.zeros((len(a), len(b)))\n z = 0 # Use to denote the max element in L\n ret = [] # All the common sub-string with longest length will store in ret\n for i in xrange(0,len(a)):\n for j in xrange(0,len(b)):\n if a[i] == b[j]:\n if i==0 or j==0:\n L[i][j] = 1\n else:\n L[i][j] = L[i-1][j-1] + 1\n if L[i][j] > z:\n z = L[i][j]\n ret = []\n if L[i][j] == z:\n ret.append(a[int(i-z+1):i+1])\n else:\n L[i][j] = 0\n return ret", "def get_substrings(text):\n return [text[x:y] for x, y in combinations(range(len(text) + 1), r=2)]", "def split_in_substrings(s, n):\n\n output = []\n\n # for each line in s\n for line_in_s in s:\n # split the line in substrings of size n\n for i in range(len(line_in_s) - n + 1):\n output.append(line_in_s[i:i + n])\n\n return output", "def substring_split(string, n):\n\n substrings = []\n for i in range(len(string) - n + 1):\n substrings.append(string[i:i + n])\n\n return substrings", "def subs(s, count):\r\n return [s[i:(i + count)] for i in range(len(s) - count + 1)]", "def substrary(self, index1, index2):\n Tempstr = []\n ans = \"\"\n IEnd = self.lengthary()\n if (index1 < 1) or (index1 > index2) or (index2 > IEnd):\n return ans\n if ((index2 - index1) + 1) > pratipadmax:\n return ans\n\n I = index1\n while I <= index2:\n Tempstr.append(self.Linary[I])\n I += 1\n\n ans = \"\".join(Tempstr)\n return ans", "def substrings(s, minlength=30):\n maxsize = current = len(s)\n result = []\n while current >= minlength:\n result.extend([s[start:start+current] \n for start in range(maxsize-current+1)])\n # range(5) is [0,1,2,3,4]\n current -= 1\n return set(result) # set() removes duplicates", "def sliced (s, n):\n result = [s[0+i:n+i] for i in range(0, len(s), n)]\n # if last sliced lenght less than n, then add zeros to last sliced until the lenght equal with n\n if len(result[-1]) < n:\n less = n-len(result[-1])\n zeros = ''\n for i in range(less):\n zeros = zeros + '0'\n result[-1] = result[-1]+zeros\n return result", "def substrings_of_length(length, string):\n # You could also use a generator here, but I don't want to overcomplicate\n # things.\n substrings = []\n for i in range(len(string) - length):\n substrings.append(string[i : i + length])\n return substrings", "def split_subsequences(iterable, length=2, overlap=0, \r\n join_substr=True):\r\n isstring = isinstance(iterable, str) and join_substr\r\n it = iter(iterable)\r\n results = list(itertools.islice(it, length))\r\n while len(results) == length:\r\n yield ''.join(results) if isstring else results\r\n results = results[length - overlap:]\r\n results.extend(itertools.islice(it, length - overlap))\r\n if results:\r\n yield ''.join(results) if isstring else results", "def check_substring_with_fixed_len(s: str, t: str, l: int, hs_1: List[int], ht_1: List[int],\n hs_2: List[int], ht_2: List[int], x: int, p1: int, p2: int) -> Answer:\n xl_1 = hash_power_x(x, p1, l)\n xl_2 = hash_power_x(x, p2, l)\n\n s_dict = {} # key: tuple of two hash values, value: substring start index.\n\n for i in range(len(s) - l + 1):\n h1 = (hs_1[i+l] - xl_1 * hs_1[i]) % p1\n h2 = (hs_2[i+l] - xl_2 * hs_2[i]) % p2\n s_dict[(h1, h2)] = i\n\n for j in range(len(t) - l + 1):\n h1 = (ht_1[j+l] - xl_1 * ht_1[j]) % p1\n h2 = (ht_2[j+l] - xl_2 * ht_2[j]) % p2\n key = (h1, h2)\n\n if key in s_dict:\n i = s_dict[key]\n return Answer(i, j, l)\n\n return Answer(i, j, 0)", "def find_substrs12_endchars(sidestr,mainstr,substr1,substr2,delay1=0,delay2=0):\n ## don't use regular expressions re module, which finds only non-overlapping matches\n ## we want to find overlapping matches too.\n substr2len = len(substr2)\n substr1len = len(substr1)\n abs_idx1 = 0 ## mainstr is getting chopped, but we maintain abs index on sidestr\n while True:\n idx2 = mainstr.find(substr2)\n ## find returns -1 if substr2 not found\n if idx2 != -1:\n endcharidx2 = idx2+substr2len+delay2\n ### NOTE: abs_startidx1 is one earlier than definition!!! I think necessary for causality.\n ## put +1 below to switch to definition in Quinn et al 2010\n abs_startidx1 = abs_idx1 + endcharidx2 - substr1len-delay1\n if endcharidx2<len(mainstr): # mainstr Y has characters left?\n if abs_startidx1 >= 0: # sidestr X has sufficient chars before?\n ## sidestr has substr1 before the char to be returned? and mainstr is not over\n ## IMP: below if's first term is the only place directed info enters.\n ## Remove first term below and you get just the entropy of mainstr Y: VERIFIED.\n #print sidestr[abs_startidx1:abs_startidx1+substr1len], substr1, abs_startidx1\n if sidestr[abs_startidx1:abs_startidx1+substr1len]==substr1:\n yield mainstr[endcharidx2]\n else: # reached end of string\n break\n ## chop the mainstr just after the start of substr2,\n ## not after the end, as we want overlapping strings also\n mainstr = mainstr[idx2+1:]\n ## don't chop sidestr as substr1len may be greater than substr2len\n ## in the next iteration, idx2 will be relative, but for sidestr we maintain abs_idx1\n abs_idx1 += idx2+1\n else: # substr2 not found\n break", "def substring(s):\n\n\ts=s.lower()\n\tn=len(s)\n\tss=[] #substring set\n\tlongest=1\n\tlongesti=0\n\tlongestj=0\n\n\ti=0\n\twhile(i<n):\n\t\t# if the remainder of the string to be evaluated is \n\t\t# less than the longest found so far, we are done.\n\t\tif n-i<longest:\n\t\t\tbreak\t\t\n\n\t\tss=[]\n\n\t\tfor j in range(i,n):\n\t\t\tif s[j] not in ss:\n\t\t\t\tss.append(s[j])\n\t\t\telse:\n\t\t\t\tif len(ss)>longest:\n\t\t\t\t\tlongest=len(ss)\n\t\t\t\t\tlongesti=i\n\t\t\t\t\tlongestj=j\n\t\t\t\t# shortcut to reduce substrings we need to test\t\n\t\t\t\ti += ss.index(s[j])\n\t\t\t\tbreak\n\t\ti += 1\n\tprint(s[longesti:longestj])\n\treturn longest", "def all_substrings(strings, minlength=30):\n result = set()\n for s in strings:\n result |= substrings(s, minlength)\n # \"|=\" is the set union operator\n return result", "def n_swaps(word_a: str, word_b: str, n: int) -> frozenset:\n if n <= 0:\n return frozenset()\n swaps = set()\n # Swap cartesian product of n letters in each word.\n # E.g. [(0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]\n for swap_counts in itertools.product(range(n + 1), range(n + 1)):\n swap_a = swap_counts[0]\n swap_b = swap_counts[1]\n swaps.add(\n frozenset((\n word_a[:swap_a] + word_b[swap_b:],\n word_b[:swap_b] + word_a[swap_a:],\n ))\n )\n swaps.add(\n frozenset((\n word_b[:swap_a] + word_a[swap_b:],\n word_a[:swap_b] + word_b[swap_a:],\n ))\n )\n return frozenset(swaps)", "def getWordsWithSameStart(word, wordList, n):\n wordst=[]\n\n for name in wordList:\n \n if name[:n]==word[:n]:\n wordst.append(name)#appends words with first n letters\n \n return wordst", "def getSubString(self, startidx: 'int', endidx: 'int'=-1) -> \"SbString\":\n return _coin.SbString_getSubString(self, startidx, endidx)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtain a function signature, for Python 2. This analyzes the function argument specification and constructs the argument order, required set of arguments, and optional set of arguments.
def _getsig(func): # Get the argument specification argspec = inspect.getargspec(func) order = argspec.args[:] # Figure out how many are optional if argspec.defaults: defcnt = -len(argspec.defaults) required = set(order[:defcnt]) optional = set(order[defcnt:]) else: required = set(order) optional = set() return ( order, required, optional, argspec.varargs is not None, argspec.keywords is not None, )
[ "def trampoline_signature(fn):\n\n # TODO: operator overloads\n names = []\n\n if fn[\"const\"]:\n names.append(\"K\")\n refqual = fn[\"ref_qualifiers\"]\n if refqual:\n if refqual == \"&\":\n names.append(\"R\")\n if refqual == \"&&\":\n names.append(\"O\")\n\n names.append(fn[\"name\"])\n\n params = fn[\"parameters\"]\n if not params:\n names.append(\"_v\")\n else:\n for p in params:\n names.append(\"_\")\n names.append(_encode_type(p))\n\n if fn[\"vararg\"]:\n names.append(\"_z\")\n\n return \"\".join(names)", "def _signature_info(arg_spec):\n # type: (ArgSpec) -> Tuple[List[str], List[str], bool, bool]\n if arg_spec.defaults:\n n_defaults = len(arg_spec.defaults)\n def_args = arg_spec.args[-n_defaults:]\n req_args = arg_spec.args[:-n_defaults]\n else:\n req_args = arg_spec.args\n def_args = []\n return req_args, def_args, bool(arg_spec.varargs), bool(arg_spec.keywords)", "def _get_arg_names(func):\n # noinspection PyUnresolvedReferences\n return (\n [arg for arg in inspect.getargspec(func=func).args] if six.PY2 else\n list(inspect.signature(obj=func).parameters.keys())\n )", "def _inspect_getargspec(fn):\n try:\n return inspect.getfullargspec(fn)\n except AttributeError:\n try:\n return inspect.getargspec(fn)\n except TypeError:\n return inspect.getargspec(fn.__call__)", "def _build_func_sig(func_name, arg_dict, return_type):\n\n if 'Create' in func_name:\n # don't pass in any argument to creation functions\n return \"def %s():\\n\" % func_name\n\n if ('Get' in func_name) and (return_type == 'cusparseStatus_t') and \\\n len(arg_dict) == 2:\n basic_getter = True\n else:\n basic_getter = False\n\n sig = \"def %s(\" % func_name\n for k, v in arg_dict.items():\n is_ptr = '*' in v\n if is_ptr and basic_getter:\n continue\n sig += k + \", \"\n sig = sig[:-2] + \"):\\n\"\n sig = _remove_comment(sig)\n # wrap to 2nd line if too long\n return split_line(sig, break_pattern=', ', nmax=79)", "def signature_from_ast(node: ast.FunctionDef, bound_method: bool,\n type_comment: ast.FunctionDef) -> Signature:\n params = []\n for arg in node.args.posonlyargs:\n param = Parameter(arg.arg, Parameter.POSITIONAL_ONLY, annotation=arg.type_comment)\n params.append(param)\n\n for arg in node.args.args:\n param = Parameter(arg.arg, Parameter.POSITIONAL_OR_KEYWORD,\n annotation=arg.type_comment or Parameter.empty)\n params.append(param)\n\n if node.args.vararg:\n param = Parameter(node.args.vararg.arg, Parameter.VAR_POSITIONAL,\n annotation=node.args.vararg.type_comment or Parameter.empty)\n params.append(param)\n\n for arg in node.args.kwonlyargs:\n param = Parameter(arg.arg, Parameter.KEYWORD_ONLY,\n annotation=arg.type_comment or Parameter.empty)\n params.append(param)\n\n if node.args.kwarg:\n param = Parameter(node.args.kwarg.arg, Parameter.VAR_KEYWORD,\n annotation=node.args.kwarg.type_comment or Parameter.empty)\n params.append(param)\n\n # Remove first parameter when *obj* is bound_method\n if bound_method and params:\n params.pop(0)\n\n # merge type_comment into signature\n if not_suppressed(type_comment.argtypes): # type: ignore[attr-defined]\n for i, param in enumerate(params):\n params[i] = param.replace(\n annotation=type_comment.argtypes[i]) # type: ignore[attr-defined]\n\n if node.returns:\n return Signature(params, return_annotation=node.returns)\n elif type_comment.returns:\n return Signature(params, return_annotation=ast_unparse(type_comment.returns))\n else:\n return Signature(params)", "def parse_signature(argnames, args, kwargs):\n # Parse signature\n i = 0\n for argname in argnames:\n if argname not in kwargs:\n kwargs[argname] = args[i]\n i += 1\n if i != len(args): # Not all arguments were parsed\n raise TypeError(\"superfluous arguments in signature of \"\n \"`{cls.__qualname__}.__new__`.\")\n return (kwargs[k] for k in argnames)", "def func_sig(func):\n if hasattr(func,'im_func'):\n # func is a method\n func = func.__func__\n code = func.__code__\n fname = code.co_name\n callargs = code.co_argcount\n # XXX Uses hard coded values taken from Include/compile.h\n args = list(code.co_varnames[:callargs])\n if func.__defaults__:\n i = len(args) - len(func.__defaults__)\n for default in func.__defaults__:\n try:\n r = repr(default)\n except:\n r = '<repr-error>'\n if len(r) > 100:\n r = r[:100] + '...'\n arg = args[i]\n if arg[0] == '.':\n # anonymous arguments\n arg = '(...)'\n args[i] = '%s=%s' % (arg,r)\n i = i + 1\n if code.co_flags & 0x0004: # CO_VARARGS\n args.append('*'+code.co_varnames[callargs])\n callargs = callargs + 1\n if code.co_flags & 0x0008: # CO_VARKEYWORDS\n args.append('**'+code.co_varnames[callargs])\n callargs = callargs + 1\n return '%s(%s)' % (fname,string.join(args,', '))", "def _get_dup_signature_tuples(fn: Callable) -> List[Tuple[str, str]]:\n sig_tuples: List[Tuple[str, str]] = []\n for param in inspect.signature(inspect.unwrap(fn)).parameters:\n sig_tuples.append((param, param))\n return sig_tuples", "def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> \"Signature[nodes.Argument]\":\n # For all dunder methods other than __init__, just assume all args are positional-only\n assume_positional_only = is_dunder(stub.name, exclude_init=True)\n\n all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]\n for func in map(_resolve_funcitem_from_decorator, stub.items):\n assert func is not None\n for index, arg in enumerate(func.arguments):\n # For positional-only args, we allow overloads to have different names for the same\n # argument. To accomplish this, we just make up a fake index-based name.\n name = (\n \"__{}\".format(index)\n if arg.variable.name.startswith(\"__\") or assume_positional_only\n else arg.variable.name\n )\n all_args.setdefault(name, []).append((arg, index))\n\n def get_position(arg_name: str) -> int:\n # We just need this to return the positional args in the correct order.\n return max(index for _, index in all_args[arg_name])\n\n def get_type(arg_name: str) -> mypy.types.ProperType:\n with mypy.state.strict_optional_set(True):\n all_types = [\n arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]\n ]\n return mypy.typeops.make_simplified_union([t for t in all_types if t])\n\n def get_kind(arg_name: str) -> int:\n kinds = {arg.kind for arg, _ in all_args[arg_name]}\n if nodes.ARG_STAR in kinds:\n return nodes.ARG_STAR\n if nodes.ARG_STAR2 in kinds:\n return nodes.ARG_STAR2\n # The logic here is based on two tenets:\n # 1) If an arg is ever optional (or unspecified), it is optional\n # 2) If an arg is ever positional, it is positional\n is_opt = (\n len(all_args[arg_name]) < len(stub.items)\n or nodes.ARG_OPT in kinds\n or nodes.ARG_NAMED_OPT in kinds\n )\n is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds\n if is_opt:\n return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT\n return nodes.ARG_POS if is_pos else nodes.ARG_NAMED\n\n sig = Signature() # type: Signature[nodes.Argument]\n for arg_name in sorted(all_args, key=get_position):\n # example_arg_name gives us a real name (in case we had a fake index-based name)\n example_arg_name = all_args[arg_name][0][0].variable.name\n arg = nodes.Argument(\n nodes.Var(example_arg_name, get_type(arg_name)),\n type_annotation=None,\n initializer=None,\n kind=get_kind(arg_name),\n )\n if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):\n sig.pos.append(arg)\n elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):\n sig.kwonly[arg.variable.name] = arg\n elif arg.kind == nodes.ARG_STAR:\n sig.varpos = arg\n elif arg.kind == nodes.ARG_STAR2:\n sig.varkw = arg\n else:\n raise AssertionError\n return sig", "def __retrieve_event_signature(function_name, parameters) -> str:\n type_names: List[str] = []\n for i, param in enumerate(parameters):\n if i > 0:\n # If there's no hint of argument in the function declaration,\n # raise an exception\n if param.annotation is Parameter.empty:\n raise IllegalFormatException(\n f\"Missing argument hint for '{function_name}': '{param.name}'\")\n\n main_type = None\n if isinstance(param.annotation, type):\n main_type = param.annotation\n elif param.annotation == 'Address':\n main_type = Address\n\n # Raises an exception if the types are not supported\n if main_type is None or not issubclass(main_type, BaseType.__constraints__):\n raise IllegalFormatException(\n f\"Unsupported type for '{param.name}: {param.annotation}'\")\n\n type_names.append(str(main_type.__name__))\n return f\"{function_name}({','.join(type_names)})\"", "def kw_and_pos_args_from_func(func):", "def abi_function_signature(self):\n signature = \"{func_name}({arg_types})\".format(\n func_name=self.name,\n arg_types=','.join(self.input_types),\n )\n return ethereum_utils.big_endian_to_int(ethereum_utils.sha3(signature)[:4])", "def enforce_signature(function):\n argspec = inspect.getfullargspec(function)\n annotations = argspec.annotations\n argnames = argspec.args\n\n unnamed_annotations = {}\n for i, arg in enumerate(argnames):\n if arg in annotations:\n unnamed_annotations[i] = (annotations[arg], arg)\n\n @wraps(function)\n def decorated(*args, **kwargs):\n for i, annotation in unnamed_annotations.items():\n if i < len(args):\n assert_right_type(args[i], annotation[0], annotation[1])\n\n for argname, argval in kwargs.items():\n if argname in annotations:\n assert_right_type(argval, annotations[argname], argname)\n\n return function(*args, **kwargs)\n\n return decorated", "def parseFunction(node):\n \n args = []\n for arg in node.args.args:\n if isinstance(arg, ast.Name):\n if arg.id != \"self\":\n args.append(arg.id)\n elif isinstance(arg, ast.Tuple):\n args.append(getValue(arg))\n else:\n print arg\n \n offset = 1\n for default in reversed(node.args.defaults):\n args[-offset] += \"=%s\" % getValue(default)\n offset += 1\n \n if (node.args.vararg):\n args.append(\"*%s\" % node.args.vararg)\n if (node.args.kwarg):\n args.append(\"**%s\" % node.args.kwarg)\n \n docstring = ast.get_docstring(node, True)\n \n return (args, docstring or \"\")", "def generate_sig(func: Func, *params: Parameter) -> Signature:\n return signature(func).replace(\n parameters=list(signature(func).parameters.values()) + list(params)\n )", "def get_kinds_from_signature(signature: Signature) -> tuple:\n return tuple([p.kind for p in signature.parameters.values()])", "def python_signature(o):\n\n if not callable(o):\n return None\n\n # If this is pure-python, then just inspect the signature.\n try:\n sig = inspect.signature(o)\n return (str(sig))\n except Exception:\n pass\n\n # Otherwise, look at the docstring.\n s = getattr(o, \"__doc__\", \"\")\n\n renpy.game.script.all_pyexpr = [ ]\n\n\n s = s.split(\"\\n\\n\")[0]\n\n if \"(\" not in s:\n return None\n\n if \")\" not in s:\n return None\n\n s = s.replace(\"-> void\", \"\")\n\n lines = renpy.parser.list_logical_lines('<test>', s, 1, add_lines=True)\n nested = renpy.parser.group_logical_lines(lines)\n\n l = renpy.parser.Lexer(nested)\n l.advance()\n\n l.word()\n while l.match(r\"\\.\"):\n l.word()\n\n rv = \"\"\n\n def consume(pattern):\n nonlocal rv\n m = l.match(pattern)\n if m:\n rv += m\n\n return m\n\n consume(r'\\(')\n\n first = True\n\n while True:\n consume(\",\")\n\n if consume(r'\\)'):\n break\n\n consume(r'\\**')\n\n if not first:\n rv += \" \"\n else:\n first = False\n\n argname = l.word()\n\n while True:\n n = l.word()\n if n is not None:\n argname = n\n else:\n break\n\n rv += argname # type: ignore\n rv += l.delimited_python(\",)\")\n\n rv += \" \"\n rv += l.rest()\n\n return rv", "def _parse_function_doc(func):\r\n # TODO: things like utime(path, (atime, mtime)) and a(b [, b]) -> None\r\n doc = inspect.getdoc(func)\r\n\r\n if doc is None:\r\n return '', 'pass'\r\n\r\n # get full string, parse round parentheses: def func(a, (b,c))\r\n try:\r\n count = 0\r\n debug.dbg(func, func.__name__, doc)\r\n start = doc.index('(')\r\n for i, s in enumerate(doc[start:]):\r\n if s == '(':\r\n count += 1\r\n elif s == ')':\r\n count -= 1\r\n if count == 0:\r\n end = start + i\r\n break\r\n param_str = doc[start + 1:end]\r\n except (ValueError, UnboundLocalError, AttributeError):\r\n # ValueError for doc.index\r\n # UnboundLocalError for undefined end in last line\r\n debug.dbg('no brackets found - no param')\r\n end = 0\r\n param_str = ''\r\n else:\r\n # remove square brackets, that show an optional param ( = None)\r\n def change_options(m):\r\n args = m.group(1).split(',')\r\n for i, a in enumerate(args):\r\n if a and '=' not in a:\r\n args[i] += '=None'\r\n return ','.join(args)\r\n\r\n while True:\r\n param_str, changes = re.subn(r' ?\\[([^\\[\\]]+)\\]',\r\n change_options, param_str)\r\n if changes == 0:\r\n break\r\n param_str = param_str.replace('-', '_') # see: isinstance.__doc__\r\n\r\n # parse return value\r\n r = re.search('-[>-]* ', doc[end:end + 7])\r\n if r is None:\r\n ret = ''\r\n else:\r\n index = end + r.end()\r\n # get result type, which can contain newlines\r\n pattern = re.compile(r'(,\\n|[^\\n-])+')\r\n ret_str = pattern.match(doc, index).group(0).strip()\r\n # New object -> object()\r\n ret_str = re.sub(r'[nN]ew (.*)', r'\\1()', ret_str)\r\n\r\n ret = BuiltinModule.map_types.get(ret_str, ret_str)\r\n if ret == ret_str and ret not in ['None', 'object', 'tuple', 'set']:\r\n debug.dbg('not working', ret_str)\r\n\r\n ret = ('return ' if 'return' not in ret else '') + ret\r\n return param_str, ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize an ``InjectorCleanup`` instance.
def __init__(self, injector): self.injector = injector self.keep = None
[ "def initialize(cls, io_loop=None):\r\n if cls._initialized:\r\n return\r\n if io_loop is None:\r\n io_loop = ioloop.IOLoop.current()\r\n cls._old_sigchld = signal.signal(\r\n signal.SIGCHLD,\r\n lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))\r\n cls._initialized = True", "def init_tetanus_lib(self) -> None:\n\n # Can't set instance attributes in fixture with scope='class', only class attributes.\n cls = type(self)\n cls._tetanus_lib = Tetanus() # pylint: disable=protected-access\n cls._echo_port = 1337 # pylint: disable=protected-access", "def _make_finalizer(self):\n overloads = self.overloads\n targetctx = self.targetctx\n # Early-bind utils.shutting_down() into the function's local namespace\n # (see issue #689)\n def finalizer(shutting_down=utils.shutting_down):\n # The finalizer may crash at shutdown, skip it (resources\n # will be cleared by the process exiting, anyway).\n if shutting_down():\n return\n # This function must *not* hold any reference to self:\n # we take care to bind the necessary objects in the closure.\n for func in overloads.values():\n try:\n targetctx.remove_user_function(func)\n targetctx.remove_native_function(func)\n except KeyError:\n # Not a native function (object mode presumably)\n pass\n\n return finalizer", "def _init_logger(self):\n #self._logger = logger_factory.make_logger(__name__)", "def injector(self, injector: Injector):\n self._injector = injector", "def __init__(self, services_packages=None):\n\n self._services = {}\n\n if not services_packages:\n services_packages = settings.SERVICE_DIRS\n self._import_services(services_packages)\n self._register_services(Service)", "def register_cleanup(self, shutdown):\n self.cleanups.insert(0,shutdown)", "def initialize(self):\n try:\n api_key = self._pomodoro_service.get_config(\"task.asana\", \"api_key\")\n self.asana_api = self._get_asana_api(api_key)\n except Exception as ex:\n logger.error(\"Error initializing plugin: {0}\".format(ex))", "def __init__(self, __container=None, **inject_kwargs):\n self._inject_kwargs = inject_kwargs\n self._container = __container", "def initialize_plugin(self) -> None:\n pass", "def __shared_initialize__(self, **kwargs):", "def __init__(self):\n if LogManager.__instance:\n return\n LogManager.__instance = self\n self.configure()", "def uninit():\n sys.meta_path.remove(_importer)", "async def build_context(self) -> InjectionContext:\n context = InjectionContext(settings=self.settings)\n context.settings.set_default(\"default_label\", \"Aries Cloud Agent\")\n\n if context.settings.get(\"timing.enabled\"):\n timing_log = context.settings.get(\"timing.log_file\")\n collector = Collector(log_path=timing_log)\n context.injector.bind_instance(Collector, collector)\n\n # Shared in-memory cache\n context.injector.bind_instance(BaseCache, InMemoryCache())\n\n # Global protocol registry\n context.injector.bind_instance(ProtocolRegistry, ProtocolRegistry())\n\n # Global goal code registry\n context.injector.bind_instance(GoalCodeRegistry, GoalCodeRegistry())\n\n # Global event bus\n context.injector.bind_instance(EventBus, EventBus())\n\n # Global did resolver\n context.injector.bind_instance(DIDResolver, DIDResolver([]))\n context.injector.bind_instance(DIDMethods, DIDMethods())\n context.injector.bind_instance(KeyTypes, KeyTypes())\n context.injector.bind_instance(\n BaseVerificationKeyStrategy, DefaultVerificationKeyStrategy()\n )\n\n await self.bind_providers(context)\n await self.load_plugins(context)\n\n # Set DIDComm prefix\n DIDCommPrefix.set(context.settings)\n\n return context", "def __init__(self):\n\t\t\n\t\tif (TranslatorInstance.__api == None):\n\n\t\t\tTranslatorInstance.__api = Translator()", "def __init__(self):\n super(TNL3ServicePlugin, self).__init__()\n self._tn_info = None\n # self._driver = None\n self.task_manager = tasks.TaskManager()\n self.task_manager.start()\n self.tn_init()", "def setUp(self):\n self.addCleanup(setattr, util, '_idFunction', util._idFunction)", "def __del__(self):\n self.cleanup()", "def deinit(self):\n _removeHandler(self.logger, self.logHandler)\n with lock_file_log:\n self.fd.close()\n self.logger = None\n self.logHandler = None\n self.fd = None\n self.filePath = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exit the context manager. This deletes all keys added to the injector since the context manager was entered. The exception, if any, is not handled.
def __exit__(self, exc_type, exc_value, exc_tb): # Must not be None assert self.keep is not None # Delete the added keys for key in self.injector._keys - self.keep: del self.injector[key] # Reset keep self.keep = None return None
[ "def __exit__(self, exc_type, exc_val, exc_tb):\n # Try to clean up what we can before exiting.\n for rg in self.redundancy_groups:\n while rg:\n try:\n self.pop_and_cleanup_route_reflector(rg)\n except KeyboardInterrupt:\n raise\n except Exception:\n pass", "def clean_context() -> t.Generator[contextlib.ExitStack, None, None]:\n stack = contextlib.ExitStack()\n yield stack\n stack.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n GlobalOGLContextStack.pop_current() # removing self\n # return binding\n last = GlobalOGLContextStack.get_current()\n if last and last is self:\n pass\n else:\n if last:\n with last.manager.glfw as glfw_window:\n glfw.make_context_current(glfw_window)\n else:\n glfw.make_context_current(None)\n ContextCounter.checkout(self)", "def __exit__(self, type, value, traceback):\n logger.info('Exiting slack_bot')", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.database_handle.close_databases()", "def _destroy(self):\n self._dict._destroy()", "def close(self):\n self.env = None", "def __exit__(self, *args: Any) -> None:\n self.close()", "def destroy_context():\n context = libca.ca_current_context()\n if context != ffi.NULL and context in __exception_callback:\n del __exception_callback[context]\n\n libca.ca_context_destroy()", "def __exit__(self, exc_type, exc_value, traceback):\n self.stop()", "def end(self):\n\n # Stop all the plugins...\n for obj in self.__objList:\n stop = getattr(obj[0], 'stop', None)\n if isinstance(stop, types.MethodType):\n stop()\n\n # Destroy the database...\n for obj in self.__objList:\n inst = obj[0]\n name = obj[1]\n destroy = getattr(inst, 'destroy', None)\n if isinstance(destroy, types.MethodType):\n ret = destroy()\n if isinstance(ret, types.GeneratorType):\n for blah in ret:\n pass\n\n # Die...\n sys.exit()", "def __exit__(self, *args):\r\n\t\tself.io_buffer.close()", "def __clean__(self):\n if self.os_session:\n keystone_utils.close_session(self.os_session)", "def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self.unregister(\n names=(set(self.tables()) - self._tables_scope_stack.pop()),\n )", "def exit(self, save=True):\n\t\tif save:\n\t\t\tself.connection.commit()\n\t\t\tif self.changed_contexts:\n\t\t\t\tc = self.connection.cursor()\n\t\t\t\tc.execute(\"\"\"\n\t\t\t\t\tSELECT DISTINCT path FROM Context\n\t\t\t\t\tORDER BY path\n\t\t\t\t\"\"\")\n\t\t\t\tdata_ctx = op.join(DATA_DIR, DATA_CTX_NAME)\n\t\t\t\twith open(data_ctx, 'w') as ctx_file:\n\t\t\t\t\tfor row in c:\n\t\t\t\t\t\tctx = userify_context(row[0])\n\t\t\t\t\t\tctx_file.write(ctx + '\\n')\n\t\tself.connection.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self._raster.close()\n del self.__dict__['_tmp_access_kwargs']\n del self.__dict__['_meta']\n del self.__dict__['_raster']", "def __del__(self):\n self.cleanup()", "def __exit__(self, *args):\n if self.imap is not None:\n try:\n self.imap.close_folder()\n except:\n pass\n self.imap.logout()", "def finalizer():\n\n for instance in instances:\n try:\n instance.get()\n except CommandFailed:\n log.warning(\"Pool is already deleted\")\n continue\n blockpool_ui_obj = BlockPoolUI()\n if not blockpool_ui_obj.delete_pool(instance.name):\n instance.delete()\n raise PoolNotDeletedFromUI(\n f\"Could not delete block pool {instances.name} from UI.\"\n \" Deleted from CLI\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the value of an item. This also discards any deferred callable that has been set for the key.
def __delitem__(self, key): # Handle the KeyError case first if key not in self._keys: raise KeyError(key) # Pop it off self._available.pop(key, None) self._deferred.pop(key, None) self._keys.discard(key)
[ "def remove_item(self, key, value):\n ...", "def delete(self, key, item): # noqa\n return self.execute_command(CF_DEL, key, item)", "def _delitem(\n self,\n key: K,\n ) -> None:\n if test_mode:\n assert (\n self._lock.locked()\n ), \"The mutex self._lock should be locked by this thread\"\n del self._cache[key]\n self._add_or_remove()", "def __delitem__(self, key):\n query = self.store.delete().where(self.store.c.key == key)\n result = self.conn.execute(query)\n if result.rowcount == 0:\n raise KeyError", "def __delitem__(self, key):\n hash_val = self._hash(key)\n if self.table[hash_val] != self.defVal and (isinstance(self.table[hash_val], tuple) and\n self.table[hash_val][0] == key and\n self.table[hash_val][2] == True):\n self.table[hash_val] = (self.table[hash_val][0], self.table[hash_val][1], False)\n else:\n key_found = False\n iter_count = 0\n while not key_found:\n if hash_val >= self.capacity:\n hash_val = 0\n if self.table[hash_val] == self.defVal:\n \traise KeyError\n if self.table[hash_val] != self.defVal:\n if self.table[hash_val][0] == key:\n if self.table[hash_val][2] == True:\n self.table[hash_val] = (self.table[hash_val][0], \n self.table[hash_val][1], False)\n key_found = True\n break\n hash_val += 1\n iter_count += 1", "def __delitem__(self, key: Union[slice, int]) -> None:\n length_before = len(self)\n self._bitstore.__delitem__(key)\n if len(self) != length_before:\n self._pos = 0", "def __delitem__(self, key: Union[Any, Sequence[Any]]) -> None:\n self.contents = {\n i: self.contents[i] \n for i in self.contents if i not in more_itertools.always_iterable(key)}\n return self", "def delete(self, key):\n del self.dict[key]", "def remove(self, key):\r\n hash_idx = hash_string(key, self.slots)\r\n num = 1\r\n while self.table[hash_idx] and self.table[hash_idx].key != key:\r\n hash_idx = (hash_idx + num*num) % self.slots\r\n num += 1\r\n if self.table[hash_idx] is None:\r\n raise KeyError\r\n return_val = self.table[hash_idx]\r\n self.table[hash_idx] = self.deleted\r\n self.num_items -= 1\r\n return return_val", "def delete_item(table, pk_name, pk_value):\n response = table.delete_item(Key={pk_name: pk_value})\n\n return response", "def remove(self, key):\n \n value = self._linear_search(key)\n if value != -1:\n value = self._values.pop(value)\n else:\n value = None\n \n \n return value", "def __delitem__(self, pbft_public_key):\n try:\n del self._store_db[pbft_public_key]\n\n # If the key is the active key, then also clear the active key\n if self.active_key == pbft_public_key:\n self.active_key = None\n except KeyError:\n pass", "def remove(obj, key, val=ANY):\n return el.removes(parse(key), obj, val)", "def remove_item(self, query, key=None):\n key = key if key is not None else TodoKey.null\n todokey = self.get_key(key, None)\n if todokey is None:\n return None\n removed = todokey.remove_item(query)\n return removed", "def remove_item(self, query):\n keyresult = self.find_item(query)\n removed = None\n if keyresult:\n removed = self.data.pop(keyresult.index)\n else:\n debug('Falsey key result: {}'.format(keyresult))\n\n return removed", "def delVals(self,db, key, dupdata=True):\n with self.env.begin(db=db, write=True, buffers=True) as txn:\n return (txn.delete(key))", "def remove_task(self, item: T) -> None:\n entry = self.entry_finder.pop(item)\n entry.item = self.REMOVED", "def popitem(self, *pargs, **kargs):\n try:\n result = super(MutationDict, self).popitem(*pargs, **kargs)\n except Exception:\n raise\n else:\n self.changed()\n return result", "def remove_item(self, item):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a deferred callable for a key. This may be used when a value for a key should only be generated if the function being called actually wants the value, usually because generating the value is somewhat expensive.
def set_deferred(self, key, func): self._deferred[key] = func
[ "def set_lazy(self, key, value_callable):\n if key in self._dic:\n del self._dic[key]\n self._lazyload[key] = value_callable", "def setter(self, fn):\n self.cb_set = fn", "def makeKeyGetter( k ):\n def myFunc( v ):\n return k( v[1] )\n print('making key getter for k=', k)\n return myFunc", "def gen_setter(keyval_dict,key,val):\n\n if not(key in keyval_dict):\n name = '_'+key\n if not(name in keyval_dict):\n raise KeyError(' Property name: {0} is not defined'.format(key))\n else:\n name = key\n\n test_val = keyval_dict[name];\n if isinstance(test_val,ComplexProperty):\n if not isinstance(val,list):\n raise KeyError(' You can not assign non-list value to complex property {0}'.format(key))\n pass\n # Assigning values for composite function to the function components\n test_val.__set__(keyval_dict,val)\n return None\n else:\n keyval_dict[key] = val;\n return None", "def set_gauge_function(self, value_function: callable, key_tag: str = None, value_key: str = None) -> None:\n self.value_function = value_function\n if bool(key_tag) and bool(value_key):\n raise ValueError(\"Please provide either key_tag or value_key, but not both\")\n self.key_tag = key_tag\n self.value_key = value_key\n self._integer_return = not bool(key_tag) and not bool(value_key)", "def set_callback(self, key_name, callback, silent=False):\n if not silent:\n self.sanity_check_cb(key_name, callback)\n self.keymap[key_name] = callback", "def dnskey_set_key(self, f):\n return _ldns._ldns_rr_dnskey_set_key(self, f)\n #parameters: ldns_rr *, ldns_rdf *,\n #retvals: bool", "def __setitem__(self, key, value):\n self.fcmdict[key] = value", "def fattr(key, value):\n\n def wrapper(fn):\n setattr(fn, key, value)\n return fn\n\n return wrapper", "def __setattr__(self, key, val):\n if key.startswith(\"_\"):\n object.__setattr__(self, key, val)\n else:\n self._kwargs[key] = val", "def rest_key_assign(call, value_type):\n def decorator_impl(klass):\n\n def load_json_dict(json_dict, *args, **kwargs):\n inst = klass()\n key_assign_method = getattr(inst, call)\n for json_key, json_blob in json_dict.iteritems():\n value = value_type.load_json(json_blob)\n key_assign_method(json_key, value)\n return inst\n \n setattr(klass, \"load_json_dict\", staticmethod(load_json_dict))\n return klass\n return decorator_impl", "def set_callback(self, data_id, func):\n self.callbacks[data_id] = func", "def _lazy_setdefault(dict, key, default):\n try:\n return dict[key]\n except KeyError:\n value = default()\n dict[key] = value\n return value", "def set_func(self, func):\n self._func = func", "def set_val(self, key, val, extra_data):\n raise NotImplementedError", "def set_generic(self, _key: str, _type, _value):\n set_func = {\n \"bool\" : self.set_bool,\n \"float\" : self.set_float,\n \"int\" : self.set_int,\n \"point\" : self.set_point,\n \"points\": self.set_points,\n \"str\" : self.set_str\n }\n\n # noinspection PyArgumentList\n set_func.get(_type)(_key, _value)", "def usergetter(self, f):\r\n self._usergetter = f\r\n return f", "def SetFunction(self, id, value):\n self.functions[id] = value", "def grantsetter(self, f):\r\n self._grantsetter = f\r\n return f" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function decorator that allows dependency injection to be tailored. In most cases, it is not necessary to use this decorator; it may be used when a function takes all keyword argumentsi.e., a ``kwargs`` or similar argument is presentbut the developer wishes to restrict the set of injectible arguments.
def inject(required=None, optional=None): # The actual decorator; just calls from_func() with appropriate # arguments def decorator(func): WantSignature.from_func(func, required=required, optional=optional) return func return decorator
[ "def inject_config(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n sig = signature(function)\n\n # for each parameter that wasn't passed as args\n for parameter_name in list(sig.parameters)[len(args):]:\n # and wasn't passed in kwargs\n if kwargs.get(parameter_name, DEFAULT) is DEFAULT:\n # set configured value based on the annotation key\n config_key = sig.parameters[parameter_name].annotation\n if config_key != Signature.empty:\n kwargs[parameter_name] = configured(config_key)\n\n return function(*args, **kwargs)\n\n return wrapper", "def argument(*args, **kwargs):\r\n\r\n def decorator(function):\r\n if isinstance(function, Command):\r\n func = function.function\r\n else:\r\n func = function\r\n\r\n if not hasattr(func, 'djboss_arguments'):\r\n func.djboss_arguments = []\r\n func.djboss_arguments.append((args, kwargs))\r\n\r\n return function\r\n return decorator", "def noninjectable(*args: str) -> Callable[[CallableT], CallableT]:\n\n def decorator(function: CallableT) -> CallableT:\n argspec = inspect.getfullargspec(inspect.unwrap(function))\n for arg in args:\n if arg not in argspec.args and arg not in argspec.kwonlyargs:\n raise UnknownArgument('Unable to mark unknown argument %s ' 'as non-injectable.' % arg)\n\n existing = getattr(function, '__noninjectables__', set())\n merged = existing | set(args)\n cast(Any, function).__noninjectables__ = merged\n return function\n\n return decorator", "def inject(fun: Callable) -> Callable:\n sig = inspect.signature(fun)\n\n injectables: Dict[str, Any] = {}\n for name, param in sig.parameters.items():\n type_ = param.annotation\n if name == 'self':\n continue\n else:\n injectables[name] = type_\n\n @wraps(fun)\n def _inner(*args, **kwargs):\n container = Container()\n for n, t in injectables.items():\n if n not in kwargs:\n kwargs[n] = container.get_object(t)\n\n return fun(*args, **kwargs)\n\n return _inner", "def with_argspec(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n return f(*args, **kwargs)\n return wrapped", "def keyword_only_args(*included_keywords):\n def decorator(func):\n \"\"\"Decorator factory, assigns arguments as keyword-only and\n calculates sets for error checking.\n\n Args:\n func: The function to decorate.\n\n Returns:\n A function wrapped so that it has keyword-only arguments. \n \"\"\"\n signature = _signature(func)\n kw_only_args = set(included_keywords)\n positional_args = set()\n args_with_defaults = set()\n for name, param in signature.parameters.items():\n if param.kind in {param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD}:\n positional_args.add(name)\n if not included_keywords and param.kind is param.POSITIONAL_OR_KEYWORD and param.default is not param.empty:\n kw_only_args.add(name)\n if param.default is not param.empty:\n args_with_defaults.add(name)\n\n @functools.wraps(func)\n def wrapper(*args, **kws):\n \"\"\"The decorator itself, checks arguments with set operations, moves\n args from *args into **kws, and then calls func().\n\n Args:\n *args, **kws: The arguments passed to the original function.\n\n Returns:\n The original function's result when it's called with the\n modified arguments.\n\n Raises:\n TypeError: When there is a mismatch between the supplied\n and expected arguments.\n\n \"\"\"\n keys = collections.KeysView(kws)\n # Are all the keyword-only args covered either by a passed\n # argument or a default?\n if not kw_only_args <= keys | args_with_defaults:\n wrong_args(func, signature, kw_only_args - (keys | args_with_defaults), 'keyword-only')\n # Are there enough positional args to cover all the\n # arguments not covered by a passed argument or a default?\n if len(args) < len(positional_args - (keys | args_with_defaults)):\n wrong_args(func, signature, positional_args - (keys | args_with_defaults), 'positional', len(args))\n\n args = list(args)\n for index, (name, param) in enumerate(signature.parameters.items()):\n if param.kind is param.VAR_POSITIONAL or param.kind is param.VAR_KEYWORD:\n break\n if name in kw_only_args or name in keys & positional_args:\n args.insert(index, kws.pop(name, param.default))\n func(*args, **kws)\n return wrapper\n\n def wrong_args(func, signature, missing_args, arg_type, number_of_args=0):\n \"\"\" Raise Python 3-style TypeErrors for missing arguments.\"\"\"\n ordered_args = [a for a in signature.parameters if a in missing_args]\n ordered_args = ordered_args[number_of_args:]\n error_message = ['%s() missing %d required %s argument' % (func.__name__, len(ordered_args), arg_type)]\n if len(ordered_args) == 1:\n error_message.append(\": '%s'\" % ordered_args[0])\n else:\n error_message.extend(['s: ', ' '.join(\"'%s'\" % a for a in ordered_args[:-1]), \" and '%s'\" % ordered_args[-1]])\n raise TypeError(''.join(error_message))\n\n return decorator", "def create_arguments(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return functools.partial(func, *args, **kwargs)\n return wrapper", "def inject(**named_dependencies: _Name) -> Callable[[_InitMethod], _InitMethod]:\n def handle(init_method: _InitMethod) -> _InitMethod:\n err = _check_dependencies(init_method, named_dependencies)\n if err is not None:\n raise BadConstructorError(\"Constructor \\\"%s\\\" %s\" % (init_method, err))\n\n # noinspection PyCallingNonCallable\n setattr(init_method, _PYPROVIDE_PROPERTIES_ATTR,\n _InjectDecoratorProperties(named_dependencies))\n return init_method\n return handle", "def kwargs_decorator(deco):\n return update_wrapper(curry(deco), deco)", "def particle_input(\n wrapped_function: Callable = None,\n require: Union[str, Set, List, Tuple] = None,\n any_of: Union[str, Set, List, Tuple] = None,\n exclude: Union[str, Set, List, Tuple] = None,\n none_shall_pass: bool = False,\n) -> Any:\n\n if exclude is None:\n exclude = set()\n if any_of is None:\n any_of = set()\n if require is None:\n require = set()\n\n def decorator(wrapped_function: Callable):\n wrapped_signature = inspect.signature(wrapped_function)\n\n # add '__signature__' to methods that are copied from\n # wrapped_function onto wrapper\n assigned = list(functools.WRAPPER_ASSIGNMENTS)\n assigned.append(\"__signature__\")\n\n @functools.wraps(wrapped_function, assigned=assigned)\n def wrapper(*args, **kwargs):\n annotations = wrapped_function.__annotations__\n bound_args = wrapped_signature.bind(*args, **kwargs)\n\n default_arguments = bound_args.signature.parameters\n arguments = bound_args.arguments\n argnames = bound_args.signature.parameters.keys()\n\n # Handle optional-only arguments in function declaration\n for default_arg in default_arguments:\n # The argument is not contained in `arguments` if the\n # user does not explicitly pass an optional argument.\n # In such cases, manually add it to `arguments` with\n # the default value of parameter.\n if default_arg not in arguments:\n arguments[default_arg] = default_arguments[default_arg].default\n\n funcname = wrapped_function.__name__\n\n args_to_become_particles = []\n for argname in annotations.keys():\n if isinstance(annotations[argname], tuple):\n if argname == \"return\":\n continue\n annotated_argnames = annotations[argname]\n expected_params = len(annotated_argnames)\n received_params = len(arguments[argname])\n if expected_params != received_params:\n raise ValueError(\n f\"Number of parameters allowed in the tuple \"\n f\"({expected_params} parameters) are \"\n f\"not equal to number of parameters passed in \"\n f\"the tuple ({received_params} parameters).\"\n )\n elif isinstance(annotations[argname], list):\n annotated_argnames = annotations[argname]\n expected_params = len(annotated_argnames)\n if expected_params > 1:\n raise TypeError(\n \"Put in [Particle] as the annotation to \"\n \"accept arbitrary number of Particle arguments.\"\n )\n else:\n annotated_argnames = (annotations[argname],)\n\n for annotated_argname in annotated_argnames:\n is_particle = (\n annotated_argname is Particle\n or annotated_argname is Optional[Particle]\n )\n if is_particle and argname != \"return\":\n args_to_become_particles.append(argname)\n\n if not args_to_become_particles:\n raise ParticleError(\n f\"None of the arguments or keywords to {funcname} \"\n f\"have been annotated with Particle, as required \"\n f\"by the @particle_input decorator.\"\n )\n elif len(args_to_become_particles) > 1:\n if \"Z\" in argnames or \"mass_numb\" in argnames:\n raise ParticleError(\n f\"The arguments Z and mass_numb in {funcname} are not \"\n f\"allowed when more than one argument or keyword is \"\n f\"annotated with Particle in functions decorated \"\n f\"with @particle_input.\"\n )\n\n for x in args_to_become_particles:\n if (\n annotations[x] is Particle\n and isinstance(arguments[x], (tuple, list))\n and len(arguments[x]) > 1\n ):\n raise TypeError(\n f\"You cannot pass a tuple or list containing \"\n f\"Particles when only single Particle was \"\n f\"expected, instead found {arguments[x]}. If you \"\n f\"intend to pass more than 1 Particle instance, \"\n f\"use a tuple or a list type. \"\n f\"That is use (Particle, Particle, ...) or \"\n f\"[Particle] in function declaration.\"\n )\n\n # If the number of arguments and keywords annotated with\n # Particle is exactly one, then the Z and mass_numb keywords\n # can be used without potential for ambiguity.\n\n Z = arguments.get(\"Z\", None)\n mass_numb = arguments.get(\"mass_numb\", None)\n\n # Go through the argument names and check whether or not they are\n # annotated with Particle. If they aren't, include the name and\n # value of the argument as an item in the new keyword arguments\n # dictionary unchanged. If they are annotated with Particle, then\n # either convert the representation of a Particle to a Particle if\n # it is not already a Particle and then do error checks.\n\n new_kwargs = {}\n\n for argname in argnames:\n raw_argval = arguments[argname]\n if isinstance(raw_argval, (tuple, list)):\n # Input argument value is a tuple or list\n # of corresponding particles or atomic values.\n argval_tuple = raw_argval\n particles = []\n else:\n # Otherwise convert it to tuple anyway so it can work\n # with loops too.\n argval_tuple = (raw_argval,)\n\n for pos, argval in enumerate(argval_tuple):\n should_be_particle = argname in args_to_become_particles\n # If the argument is not annotated with Particle, then we just\n # pass it through to the new keywords without doing anything.\n\n if not should_be_particle:\n new_kwargs[argname] = raw_argval\n continue\n\n # Occasionally there will be functions where it will be\n # useful to allow None as an argument.\n\n # In case annotations[argname] is a collection (which looks\n # like (Particle, Optional[Particle], ...) or [Particle])\n if isinstance(annotations[argname], tuple):\n optional_particle = (\n annotations[argname][pos] is Optional[Particle]\n )\n elif isinstance(annotations[argname], list):\n optional_particle = annotations[argname] == [Optional[Particle]]\n else:\n # Otherwise annotations[argname] must be a Particle itself\n optional_particle = annotations[argname] is Optional[Particle]\n\n if (optional_particle or none_shall_pass) and argval is None:\n particle = None\n else:\n params = (argval, Z, mass_numb)\n already_particle = isinstance(argval, Particle)\n\n particle = get_particle(\n argname, params, already_particle, funcname\n )\n\n if isinstance(raw_argval, (tuple, list)):\n # If passed argument is a tuple or list, keep\n # appending them.\n particles.append(particle)\n # Set appended values if current iteration is the\n # last iteration.\n if (pos + 1) == len(argval_tuple):\n new_kwargs[argname] = tuple(particles)\n del particles\n else:\n # Otherwise directly set values\n new_kwargs[argname] = particle\n\n return wrapped_function(**new_kwargs)\n\n # add '__signature__' if it does not exist\n # - this will preserve parameter hints in IDE's\n if not hasattr(wrapper, \"__signature__\"):\n wrapper.__signature__ = inspect.signature(wrapped_function)\n\n return wrapper\n\n def get_particle(argname, params, already_particle, funcname):\n argval, Z, mass_numb = params\n \"\"\"\n Convert the argument to a\n `~plasmapy.particles.particle_class.Particle` object if it is\n not already one.\n \"\"\"\n\n if not already_particle:\n\n if not isinstance(argval, (numbers.Integral, str, tuple, list)):\n raise TypeError(\n f\"The argument {argname} to {funcname} must be \"\n f\"a string, an integer or a tuple or list of them \"\n f\"corresponding to an atomic number, or a \"\n f\"Particle object.\"\n )\n\n try:\n particle = Particle(argval, Z=Z, mass_numb=mass_numb)\n except InvalidParticleError as e:\n raise InvalidParticleError(\n _particle_errmsg(argname, argval, Z, mass_numb, funcname)\n ) from e\n\n # We will need to do the same error checks whether or not the\n # argument is already an instance of the Particle class.\n\n if already_particle:\n particle = argval\n\n # If the name of the argument annotated with Particle in the\n # decorated function is element, isotope, or ion; then this\n # decorator should raise the appropriate exception when the\n # particle ends up not being an element, isotope, or ion.\n\n cat_table = [\n (\"element\", particle.element, InvalidElementError),\n (\"isotope\", particle.isotope, InvalidIsotopeError),\n (\"ion\", particle.ionic_symbol, InvalidIonError),\n ]\n\n for category_name, category_symbol, CategoryError in cat_table:\n if argname == category_name and not category_symbol:\n raise CategoryError(\n f\"The argument {argname} = {repr(argval)} to \"\n f\"{funcname} does not correspond to a valid \"\n f\"{argname}.\"\n )\n\n # Some functions require that particles be charged, or\n # at least that particles have charge information.\n\n _charge_number = particle._attributes[\"charge number\"]\n\n must_be_charged = \"charged\" in require\n must_have_charge_info = set(any_of) == {\"charged\", \"uncharged\"}\n\n uncharged = _charge_number == 0\n lacks_charge_info = _charge_number is None\n\n if must_be_charged and (uncharged or must_have_charge_info):\n raise ChargeError(f\"A charged particle is required for {funcname}.\")\n\n if must_have_charge_info and lacks_charge_info:\n raise ChargeError(f\"Charge information is required for {funcname}.\")\n\n # Some functions require particles that belong to more complex\n # classification schemes. Again, be sure to provide a\n # maximally useful error message.\n\n if not particle.is_category(require=require, exclude=exclude, any_of=any_of):\n raise ParticleError(\n _category_errmsg(particle, require, exclude, any_of, funcname)\n )\n\n return particle\n\n # The following code allows the decorator to be used either with or\n # without arguments. This allows us to invoke the decorator either\n # as `@particle_input` or as `@particle_input()`, where the latter\n # call allows the decorator to have keyword arguments.\n\n if wrapped_function is not None:\n return decorator(wrapped_function)\n else:\n return decorator", "def opts(arg_name: str, **options):\n\n def decorator(func):\n _quick_set(func, 'opts', arg_name, options, {})\n return func\n\n return decorator", "def _set_args(func, *args, **kwargs):\n\n def wrapped():\n return func(*args, **kwargs)\n\n wrapped.args = args\n wrapped.kwargs = kwargs\n wrapped.__name__ = func.__name__\n\n return wrapped", "def provider(func=None, *, singleton=False, injector=None):\n\n def decorator(func):\n wrapped = _wrap_provider_func(func, {\"singleton\": singleton})\n if injector:\n injector.register_provider(wrapped)\n return wrapped\n\n if func:\n return decorator(func)\n return decorator", "def inject(*args, **kwargs):\n\n def wrapper(obj):\n if inspect.isclass(obj) or callable(obj):\n _inject_object(obj, *args, **kwargs)\n return obj\n raise DiayException(\"Don't know how to inject into %r\" % obj)\n\n return wrapper", "def inject_closure_values(func, **kwargs):\n wrapped_by = None\n\n if isinstance(func, property):\n fget, fset, fdel = func.fget, func.fset, func.fdel\n if fget: fget = fix_func(fget, **kwargs)\n if fset: fset = fix_func(fset, **kwargs)\n if fdel: fdel = fix_func(fdel, **kwargs)\n wrapped_by = type(func)\n return wrapped_by(fget, fset, fdel)\n\n elif isinstance(func, (staticmethod, classmethod)):\n func = func.__func__\n wrapped_by = type(func)\n\n newfunc = _inject_closure_values(func, **kwargs)\n\n if wrapped_by:\n newfunc = wrapped_by(newfunc)\n return newfunc", "def inject_timeout(func):\n\n @six.wraps(func)\n def decorator(self, *args, **kwargs):\n kwargs.setdefault(\"timeout\", self._timeout)\n return func(self, *args, **kwargs)\n\n return decorator", "def kwargsfunc(**kwargs):", "def required_packages(packages):\n def decorator_without_arguments(func):\n # validate python package dependencies, should be a list of strings\n if not isinstance(packages, list) or \\\n any([not isinstance(dependency, str) for dependency in packages]):\n raise TypeError((\"python package dependencies has to be a list of\"\n \"strings like: ['mysql==1.0.0', 'another==0.2.3']\"))\n func.func_dict['required_packages'] = packages\n return func\n return decorator_without_arguments", "def is_decorated_with_inject(function: Callable[..., Any]) -> bool:\n return hasattr(function, '__bindings__')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function decorator for wrapping decorators. This works just like ``six.wraps()`` (which in turn works just like ``functools.wraps()``), but additionally manages dependency injection metadata, allowing decorators to request data independent of the function they wrap.
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES, provides=None, required=_unset, optional=_unset): # The actual decorator def decorator(func): # Generate the signature first sig = WantSignature.from_func( func, wrapped, provides, [] if required is _unset else required, [] if optional is _unset else optional, ) # Next, wrap it func = six.wraps(wrapped, assigned, updated)(func) # The wrapper may override the signature, so reset it func._micropath_signature = sig return func return decorator
[ "def decorator_with_args(decorator_to_enhance):\n\n # We use the same trick we did to pass arguments\n def decorator_maker(*args, **kwargs):\n\n # We create on the fly a decorator that accepts only a function\n # but keeps the passed arguments from the maker.\n def decorator_wrapper(func):\n\n # We return the result of the original decorator, which, after all,\n # IS JUST AN ORDINARY FUNCTION (which returns a function).\n # Only pitfall: the decorator must have this specific signature or it won't work:\n return decorator_to_enhance(func, *args, **kwargs)\n\n return decorator_wrapper\n\n return decorator_maker", "def friendly_decorator(f):\n\n @functools.wraps(f)\n def decorator(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # actual decorated function\n return f(args[0])\n else:\n # decorator arguments\n return lambda realf: f(realf, *args, **kwargs)\n\n return decorator", "def inject(required=None, optional=None):\n\n # The actual decorator; just calls from_func() with appropriate\n # arguments\n def decorator(func):\n WantSignature.from_func(func, required=required, optional=optional)\n return func\n\n return decorator", "def with_argspec(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n return f(*args, **kwargs)\n return wrapped", "def with_decorators(self, fn):\r\n return apply_decorators(fn, self.decorators)", "def passthrough_decorator(f):\n return f", "def decorator(caller, _func=None):\n if _func is not None: # return a decorated function\n # this is obsolete behavior; you should use decorate instead\n return decorate(_func, caller)\n # else return a decorator function\n defaultargs, defaults = '', ()\n if inspect.isclass(caller):\n name = caller.__name__.lower()\n doc = 'decorator(%s) converts functions/generators into ' \\\n 'factories of %s objects' % (caller.__name__, caller.__name__)\n elif inspect.isfunction(caller):\n if caller.__name__ == '<lambda>':\n name = '_lambda_'\n else:\n name = caller.__name__\n doc = caller.__doc__\n nargs = caller.__code__.co_argcount\n ndefs = len(caller.__defaults__ or ())\n defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])\n if defaultargs:\n defaultargs += ','\n defaults = caller.__defaults__\n else: # assume caller is an object with a __call__ method\n name = caller.__class__.__name__.lower()\n doc = caller.__call__.__doc__\n evaldict = dict(_call=caller, _decorate_=decorate)\n dec = FunctionMaker.create(\n '%s(func, %s)' % (name, defaultargs),\n 'if func is None: return lambda func: _decorate_(func, _call, (%s))\\n'\n 'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),\n evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)\n if defaults:\n dec.__defaults__ = (None,) + defaults\n return dec", "def call_wrapped(func, args, kwargs):\n\n # Get the function's injection signature\n sig = WantSignature.from_func(func)\n\n # Call the function\n return sig(args, kwargs)", "def wraps(app, **kw):\n def wrap(func):\n return maybe_rewrap(app, kw and lite(**kw)(func) or lite(func))\n return wrap", "def decorate(self, func):\n if not callable(func):\n raise TypeError('Cannot decorate a non callable object \"{}\"'\n .format(func))\n self.decorated = func", "def Wrap( self, fn, wrapFn ):\n def Wrapped( *args ):\n return wrapFn( *fn( *args ) )\n return Wrapped", "def decorate(func, caller, extras=()):\n evaldict = dict(_call_=caller, _func_=func)\n es = ''\n for i, extra in enumerate(extras):\n ex = '_e%d_' % i\n evaldict[ex] = extra\n es += ex + ', '\n\n if '3.5' <= sys.version < '3.6':\n # with Python 3.5 isgeneratorfunction returns True for all coroutines\n # however we know that it is NOT possible to have a generator\n # coroutine in python 3.5: PEP525 was not there yet\n generatorcaller = isgeneratorfunction(\n caller) and not iscoroutinefunction(caller)\n else:\n generatorcaller = isgeneratorfunction(caller)\n if generatorcaller:\n fun = FunctionMaker.create(\n func, \"for res in _call_(_func_, %s%%(shortsignature)s):\\n\"\n \" yield res\" % es, evaldict, __wrapped__=func)\n else:\n fun = FunctionMaker.create(\n func, \"return _call_(_func_, %s%%(shortsignature)s)\" % es,\n evaldict, __wrapped__=func)\n if hasattr(func, '__qualname__'):\n fun.__qualname__ = func.__qualname__\n return fun", "def provider(func=None, *, singleton=False, injector=None):\n\n def decorator(func):\n wrapped = _wrap_provider_func(func, {\"singleton\": singleton})\n if injector:\n injector.register_provider(wrapped)\n return wrapped\n\n if func:\n return decorator(func)\n return decorator", "def _wrap(function):\n if hasattr(function, \"simplestatic\"):\n return function\n simplestatic = {}\n @wraps(function)\n def wrapper(*args, **kwargs):\n # Remove the number of arguments specified by offset before doing\n # anything else\n offset = simplestatic.get(\"offset\", 0)\n initial, args = args[:offset], args[offset:]\n # Get the parameter list\n params = simplestatic.get(\"params\", [])\n # Make sure we've got enough parameters. If required isn't specified,\n # we're not going to require any parameters. In the future, we might\n # want to introspect the function's default arguments to see how many\n # are required; the only problem with this is that if other decorators\n # are used on the function, we won't be able to get a proper idea of\n # what arguments are required.\n required = simplestatic.get(\"required\", 0)\n if len(args) < required:\n # Not enough arguments were specified\n raise TypeError(\"%r requires %s arguments but only got %s\" %\n (function, required + offset, len(args) + len(initial)))\n # We've got enough arguments. Now pair up the parameters and the\n # arguments and make sure we've got the right types.\n for index, (param, param_type) in enumerate(zip(args, params)):\n if not isinstance(param, param_type):\n # Wrong type; raise an exception indicating so. TODO: Might\n # want to create a subclass of SemanticException and TypeError\n # specific to this error.\n raise TypeError(\"Argument %s to %r was supposed to be of type \"\n \"%r but the value %r was received instead\" % \n (index + len(initial), function, param_type, param))\n # All positional arguments (that were specified) have been checked for\n # correct type. Now check to see if remainder was specified, and check\n # the remaining arguments if it was.\n if \"remainder\" in simplestatic:\n # Remainder was specified, so check any additional arguments above\n # and beyond those mentioned in params to make sure they're of the\n # requested type.\n for index, a in enumerate(args[len(params):], len(initial) + len(params)):\n if not isinstance(a, simplestatic[\"remainder\"]):\n # Wrong type; raise an exception indicating so.\n raise TypeError(\"Variadic argument %s to %r was supposed \"\n \"to be of type %r but the value %r was received \"\n \"instead\" %\n (index, function, simplestatic[\"remainder\"], a))\n # There aren't any additional arguments or they all passed type\n # checking. We should be good to run the function now.\n result = function(*(initial + args), **kwargs)\n # Now we check to see if we're supposed to typecheck the result.\n if \"returns\" in simplestatic:\n # We're supposed to typecheck the result. First we'll see if the\n # result is supposed to be None but isn't.\n if simplestatic[\"returns\"] is None and result is not None:\n # Result is supposed to be None but it isn't. Throw an\n # appropriate exception.\n raise TypeError(\"Result of function %r was supposed to be \"\n \"None but was %r instead\" %\n (function, result))\n # Now we check to see if the result is supposed to be of a\n # particular type but isn't.\n elif not isinstance(result, simplestatic[\"returns\"]):\n # Result wasn't of the correct type\n raise TypeError(\"Result of function %r was supposed to be of \"\n \"type %r but the value %r was returned instead\" %\n (function, simplestatic[\"returns\"], result))\n # Function passed type validation, so return it.\n return result\n wrapper.simplestatic = simplestatic\n return wrapper", "def metadata(func):\n # bit of a hack to get class variables\n class_attrs = sys._getframe(1).f_locals\n suff = class_attrs.get('extractor_suffix')\n exs = class_attrs.get('metadata_extractors')\n\n # check name\n name = func.__name__\n if not name.endswith(suff):\n raise NameError(name + ' does not end with \"' + suff + '\"')\n\n # update list of extractors\n exs.append(name)\n\n # wrap to store return value\n @wraps(func)\n def wrapper(self, *args):\n val = func(self, *args)\n if val is not None:\n self.rt_dict[name[0:-len(suff)]] = val\n\n return wrapper", "def decorator(function):\n try:\n self.__save_endpoint(\n symmetric.endpoints.Endpoint(\n route, methods, response_code, function, auth_token\n )\n )\n except symmetric.errors.DuplicatedRouteError as err:\n self.__app.logger.error(\n f\"[[symmetric]] DuplicatedRouteError: {err}\"\n )\n sys.exit(1)\n\n # Decorate the wrapper\n @self.__app.route(\n route, methods=methods, endpoint=function.__name__\n )\n def wrapper(*args, **kwargs):\n \"\"\"\n Function wrapper. The main function gets logged, the JSON body\n gets extracted from the request and gets unpacked as **kwargs\n to pass to the main function. Some precautions are also taken\n (namely a try/except combo). Returns the function's output\n jsonified with a response code.\n \"\"\"\n try:\n self.__log_request(flask.request, route, function)\n\n # Get the body\n body = flask.request.get_json()\n if not body:\n body = {}\n\n # Check for token authentication\n symmetric.helpers.authenticate(\n body, auth_token, self.__client_token_name,\n self.__server_token_name)\n\n # Filter method parameters\n parameters = symmetric.helpers.filter_params(\n function, body, auth_token, self.__client_token_name)\n return flask.jsonify(function(**parameters)), response_code\n except symmetric.errors.AuthenticationRequiredError as err:\n # Error authenticating\n self.__app.logger.error(\n f\"[[symmetric]] exception caught: {err}\"\n )\n return flask.jsonify({}), 401\n except Exception as err:\n self.__app.logger.error(\n f\"[[symmetric]] exception caught: {err}\"\n )\n return flask.jsonify({}), 500\n return wrapper", "def __init__(self, func=None, decorators=None, **params):\n params[\"decorators\"] = decorators\n super(ChainedDecorator, self).__init__(func, **params)", "def kwargs_decorator(deco):\n return update_wrapper(curry(deco), deco)", "def inject(fun: Callable) -> Callable:\n sig = inspect.signature(fun)\n\n injectables: Dict[str, Any] = {}\n for name, param in sig.parameters.items():\n type_ = param.annotation\n if name == 'self':\n continue\n else:\n injectables[name] = type_\n\n @wraps(fun)\n def _inner(*args, **kwargs):\n container = Container()\n for n, t in injectables.items():\n if n not in kwargs:\n kwargs[n] = container.get_object(t)\n\n return fun(*args, **kwargs)\n\n return _inner" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call a wrapped function with appropriate dependency injection. This is for use by decorators that wrap a function that will be called via dependency injection, and ensures that the function is called only with the desired keyword arguments.
def call_wrapped(func, args, kwargs): # Get the function's injection signature sig = WantSignature.from_func(func) # Call the function return sig(args, kwargs)
[ "def inject(required=None, optional=None):\n\n # The actual decorator; just calls from_func() with appropriate\n # arguments\n def decorator(func):\n WantSignature.from_func(func, required=required, optional=optional)\n return func\n\n return decorator", "def with_argspec(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n return f(*args, **kwargs)\n return wrapped", "def inject_config(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n sig = signature(function)\n\n # for each parameter that wasn't passed as args\n for parameter_name in list(sig.parameters)[len(args):]:\n # and wasn't passed in kwargs\n if kwargs.get(parameter_name, DEFAULT) is DEFAULT:\n # set configured value based on the annotation key\n config_key = sig.parameters[parameter_name].annotation\n if config_key != Signature.empty:\n kwargs[parameter_name] = configured(config_key)\n\n return function(*args, **kwargs)\n\n return wrapper", "def _set_args(func, *args, **kwargs):\n\n def wrapped():\n return func(*args, **kwargs)\n\n wrapped.args = args\n wrapped.kwargs = kwargs\n wrapped.__name__ = func.__name__\n\n return wrapped", "def dummy_wrap(self, *args, **kwargs):\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)", "def call(self, func, *args, **kwargs):\n guessed_kwargs = self._guess_kwargs(func)\n for key, val in guessed_kwargs.items():\n kwargs.setdefault(key, val)\n try:\n return func(*args, **kwargs)\n except TypeError as exc:\n msg = (\n \"tried calling function %r but failed, probably \"\n \"because it takes arguments that cannot be resolved\"\n ) % func\n raise DiayException(msg) from exc", "def keyword_only_args(*included_keywords):\n def decorator(func):\n \"\"\"Decorator factory, assigns arguments as keyword-only and\n calculates sets for error checking.\n\n Args:\n func: The function to decorate.\n\n Returns:\n A function wrapped so that it has keyword-only arguments. \n \"\"\"\n signature = _signature(func)\n kw_only_args = set(included_keywords)\n positional_args = set()\n args_with_defaults = set()\n for name, param in signature.parameters.items():\n if param.kind in {param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD}:\n positional_args.add(name)\n if not included_keywords and param.kind is param.POSITIONAL_OR_KEYWORD and param.default is not param.empty:\n kw_only_args.add(name)\n if param.default is not param.empty:\n args_with_defaults.add(name)\n\n @functools.wraps(func)\n def wrapper(*args, **kws):\n \"\"\"The decorator itself, checks arguments with set operations, moves\n args from *args into **kws, and then calls func().\n\n Args:\n *args, **kws: The arguments passed to the original function.\n\n Returns:\n The original function's result when it's called with the\n modified arguments.\n\n Raises:\n TypeError: When there is a mismatch between the supplied\n and expected arguments.\n\n \"\"\"\n keys = collections.KeysView(kws)\n # Are all the keyword-only args covered either by a passed\n # argument or a default?\n if not kw_only_args <= keys | args_with_defaults:\n wrong_args(func, signature, kw_only_args - (keys | args_with_defaults), 'keyword-only')\n # Are there enough positional args to cover all the\n # arguments not covered by a passed argument or a default?\n if len(args) < len(positional_args - (keys | args_with_defaults)):\n wrong_args(func, signature, positional_args - (keys | args_with_defaults), 'positional', len(args))\n\n args = list(args)\n for index, (name, param) in enumerate(signature.parameters.items()):\n if param.kind is param.VAR_POSITIONAL or param.kind is param.VAR_KEYWORD:\n break\n if name in kw_only_args or name in keys & positional_args:\n args.insert(index, kws.pop(name, param.default))\n func(*args, **kws)\n return wrapper\n\n def wrong_args(func, signature, missing_args, arg_type, number_of_args=0):\n \"\"\" Raise Python 3-style TypeErrors for missing arguments.\"\"\"\n ordered_args = [a for a in signature.parameters if a in missing_args]\n ordered_args = ordered_args[number_of_args:]\n error_message = ['%s() missing %d required %s argument' % (func.__name__, len(ordered_args), arg_type)]\n if len(ordered_args) == 1:\n error_message.append(\": '%s'\" % ordered_args[0])\n else:\n error_message.extend(['s: ', ' '.join(\"'%s'\" % a for a in ordered_args[:-1]), \" and '%s'\" % ordered_args[-1]])\n raise TypeError(''.join(error_message))\n\n return decorator", "def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES, provides=None,\n required=_unset, optional=_unset):\n\n # The actual decorator\n def decorator(func):\n # Generate the signature first\n sig = WantSignature.from_func(\n func, wrapped, provides,\n [] if required is _unset else required,\n [] if optional is _unset else optional,\n )\n\n # Next, wrap it\n func = six.wraps(wrapped, assigned, updated)(func)\n\n # The wrapper may override the signature, so reset it\n func._micropath_signature = sig\n\n return func\n\n return decorator", "def wrapper(self, function, *keywords, **arguments):\n\n if self.opt.test:\n sys.stderr.write(\"TEST: %s(\" % function.__name__)\n for i in keywords:\n sys.stderr.write(\"%s, \" % (i, ))\n for k, msg in list(arguments.items()):\n sys.stderr.write(\"%s = %s, \" % (k, msg))\n sys.stderr.write(\")\\n\")\n else:\n return function(*keywords, **arguments)", "def __call__(self, *args, **kwargs):\n func = self.decorated\n if func is None:\n func = args[0]\n if args[1:] or kwargs:\n raise ValueError('Cannot decorate and setup simultaneously '\n 'with __call__(). Use __init__() or '\n 'setup() for setup. Use __call__() or '\n 'decorate() to decorate.')\n self.decorate(func)\n return self\n else:\n return self.run(func, *args, **kwargs)", "def inject(fun: Callable) -> Callable:\n sig = inspect.signature(fun)\n\n injectables: Dict[str, Any] = {}\n for name, param in sig.parameters.items():\n type_ = param.annotation\n if name == 'self':\n continue\n else:\n injectables[name] = type_\n\n @wraps(fun)\n def _inner(*args, **kwargs):\n container = Container()\n for n, t in injectables.items():\n if n not in kwargs:\n kwargs[n] = container.get_object(t)\n\n return fun(*args, **kwargs)\n\n return _inner", "def wrapper(func):\n\tdef inner_func(*args, **kwargs):\n\t\t\"\"\"\n\t\tA real inner function to run parammter function.\n\t\t:param args: default args\n\t\t:param kwargs: default more args\n\t\t:return: None\n\t\t\"\"\"\n\t\tprint \"Entering function \"\n\t\tfunc(*args, **kwargs)\n\t\tprint \"Exiting function\"\n\n\treturn inner_func", "def __call__(self, function=None, hookwrapper=False, optionalhook=False,\n tryfirst=False, trylast=False):\n def setattr_hookimpl_opts(func):\n setattr(func, self.project_name + \"_impl\",\n dict(hookwrapper=hookwrapper, optionalhook=optionalhook,\n tryfirst=tryfirst, trylast=trylast))\n return func\n\n if function is None:\n return setattr_hookimpl_opts\n else:\n return setattr_hookimpl_opts(function)", "def maybe_add_training_arg(\n original_call, wrapped_call, expects_training_arg, default_training_value):\n if not expects_training_arg:\n return wrapped_call, None\n def wrap_with_training_arg(*args, **kwargs):\n \"\"\"Wrap the `wrapped_call` function, and set training argument.\"\"\"\n training_arg_index = get_training_arg_index(original_call)\n training = get_training_arg(training_arg_index, args, kwargs)\n if training is None:\n training = default_training_value or K.learning_phase()\n\n args = list(args)\n kwargs = kwargs.copy()\n\n def replace_training_and_call(training):\n set_training_arg(training, training_arg_index, args, kwargs)\n return wrapped_call(*args, **kwargs)\n\n return control_flow_util.smart_cond(\n training, lambda: replace_training_and_call(True),\n lambda: replace_training_and_call(False))\n\n # Create arg spec for decorated function. If 'training' is not defined in the\n # args of the original arg spec, then add it to kwonlyargs.\n arg_spec = tf_inspect.getfullargspec(original_call)\n defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []\n\n kwonlyargs = arg_spec.kwonlyargs\n kwonlydefaults = arg_spec.kwonlydefaults or {}\n # Add training arg if it does not exist, or set the default training value.\n if 'training' not in arg_spec.args:\n kwonlyargs.append('training')\n kwonlydefaults['training'] = default_training_value\n else:\n index = arg_spec.args.index('training')\n training_default_index = len(arg_spec.args) - index\n if (arg_spec.defaults and\n len(arg_spec.defaults) >= training_default_index and\n defaults[-training_default_index] is None):\n defaults[-training_default_index] = default_training_value\n\n decorator_argspec = tf_inspect.FullArgSpec(\n args=arg_spec.args,\n varargs=arg_spec.varargs,\n varkw=arg_spec.varkw,\n defaults=defaults,\n kwonlyargs=kwonlyargs,\n kwonlydefaults=kwonlydefaults,\n annotations=arg_spec.annotations)\n return wrap_with_training_arg, decorator_argspec", "def _wrap(function):\n if hasattr(function, \"simplestatic\"):\n return function\n simplestatic = {}\n @wraps(function)\n def wrapper(*args, **kwargs):\n # Remove the number of arguments specified by offset before doing\n # anything else\n offset = simplestatic.get(\"offset\", 0)\n initial, args = args[:offset], args[offset:]\n # Get the parameter list\n params = simplestatic.get(\"params\", [])\n # Make sure we've got enough parameters. If required isn't specified,\n # we're not going to require any parameters. In the future, we might\n # want to introspect the function's default arguments to see how many\n # are required; the only problem with this is that if other decorators\n # are used on the function, we won't be able to get a proper idea of\n # what arguments are required.\n required = simplestatic.get(\"required\", 0)\n if len(args) < required:\n # Not enough arguments were specified\n raise TypeError(\"%r requires %s arguments but only got %s\" %\n (function, required + offset, len(args) + len(initial)))\n # We've got enough arguments. Now pair up the parameters and the\n # arguments and make sure we've got the right types.\n for index, (param, param_type) in enumerate(zip(args, params)):\n if not isinstance(param, param_type):\n # Wrong type; raise an exception indicating so. TODO: Might\n # want to create a subclass of SemanticException and TypeError\n # specific to this error.\n raise TypeError(\"Argument %s to %r was supposed to be of type \"\n \"%r but the value %r was received instead\" % \n (index + len(initial), function, param_type, param))\n # All positional arguments (that were specified) have been checked for\n # correct type. Now check to see if remainder was specified, and check\n # the remaining arguments if it was.\n if \"remainder\" in simplestatic:\n # Remainder was specified, so check any additional arguments above\n # and beyond those mentioned in params to make sure they're of the\n # requested type.\n for index, a in enumerate(args[len(params):], len(initial) + len(params)):\n if not isinstance(a, simplestatic[\"remainder\"]):\n # Wrong type; raise an exception indicating so.\n raise TypeError(\"Variadic argument %s to %r was supposed \"\n \"to be of type %r but the value %r was received \"\n \"instead\" %\n (index, function, simplestatic[\"remainder\"], a))\n # There aren't any additional arguments or they all passed type\n # checking. We should be good to run the function now.\n result = function(*(initial + args), **kwargs)\n # Now we check to see if we're supposed to typecheck the result.\n if \"returns\" in simplestatic:\n # We're supposed to typecheck the result. First we'll see if the\n # result is supposed to be None but isn't.\n if simplestatic[\"returns\"] is None and result is not None:\n # Result is supposed to be None but it isn't. Throw an\n # appropriate exception.\n raise TypeError(\"Result of function %r was supposed to be \"\n \"None but was %r instead\" %\n (function, result))\n # Now we check to see if the result is supposed to be of a\n # particular type but isn't.\n elif not isinstance(result, simplestatic[\"returns\"]):\n # Result wasn't of the correct type\n raise TypeError(\"Result of function %r was supposed to be of \"\n \"type %r but the value %r was returned instead\" %\n (function, simplestatic[\"returns\"], result))\n # Function passed type validation, so return it.\n return result\n wrapper.simplestatic = simplestatic\n return wrapper", "def Wrap( self, fn, wrapFn ):\n def Wrapped( *args ):\n return wrapFn( *fn( *args ) )\n return Wrapped", "def create_arguments(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return functools.partial(func, *args, **kwargs)\n return wrapper", "def remote_func(self, **kwargs):\n def wrapper_param(f):\n @functools.wraps(f)\n def wrapper(*f_args, **f_kwargs):\n return self.execute_python(f, f_args, f_kwargs, **kwargs)\n return wrapper\n return wrapper_param", "def call_with_injection(\n self, callable: Callable[..., T], self_: Any = None, args: Any = (), kwargs: Any = {}\n ) -> T:\n\n bindings = get_bindings(callable)\n signature = inspect.signature(callable)\n full_args = args\n if self_ is not None:\n full_args = (self_,) + full_args\n bound_arguments = signature.bind_partial(*full_args)\n\n needed = dict(\n (k, v) for (k, v) in bindings.items() if k not in kwargs and k not in bound_arguments.arguments\n )\n\n dependencies = self.args_to_inject(\n function=callable,\n bindings=needed,\n owner_key=self_.__class__ if self_ is not None else callable.__module__,\n )\n\n dependencies.update(kwargs)\n\n try:\n return callable(*full_args, **dependencies)\n except TypeError as e:\n reraise(e, CallError(self_, callable, args, dependencies, e, self._stack))\n # Needed because of a mypy-related issue (https://github.com/python/mypy/issues/8129).\n assert False, \"unreachable\" # pragma: no cover" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if a function wants a particular keyword argument.
def wants(func, keyword): # Get the function's injection signature sig = WantSignature.from_func(func) # See if it wants the argument return keyword in sig
[ "def test_KeywordParameterIsIncluded(self):\n def function_with_kw(**kw):\n return\n #---\n self.local_register.RPCFunction(function_with_kw)\n\n kwargs_var_name = self.server_stub.definitions[self.module]['function_with_kw']['args']['kwargs_var']\n assert kwargs_var_name == 'kw'", "def has_arg(func, argname):\n return argname in getargspec(func)[0]", "def _has_kwarg_or_kwargs(f, kwarg):\n # For gin wrapped functions, we need to consider the wrapped function.\n if hasattr(f, \"__wrapped__\"):\n f = f.__wrapped__\n args, _, kwargs, _ = inspect.getargspec(f)\n if kwarg in args or kwargs is not None:\n return True\n return False", "def hasarg(func, arg):\n return arg in signature(func).parameters", "def validateKwargs(function):\n missing = [p for p in function.passed.keys() for p in function.defined]\n if len(missing) > 0:\n raise ValueError(\n \"The following keyword parameters do not match: %s\" % str(missing))", "def test_KeywordArgsAreIncluded(self):\n def function_with_kwargs(argument1='test1', argument2='test2'):\n return\n #---\n self.local_register.RPCFunction(function_with_kwargs)\n\n defined_args = self.server_stub.definitions[self.module]['function_with_kwargs']['args']['defined']['kw']\n assert defined_args == {'argument1': 'test1', 'argument2': 'test2'}", "def keyword_only_args(*included_keywords):\n def decorator(func):\n \"\"\"Decorator factory, assigns arguments as keyword-only and\n calculates sets for error checking.\n\n Args:\n func: The function to decorate.\n\n Returns:\n A function wrapped so that it has keyword-only arguments. \n \"\"\"\n signature = _signature(func)\n kw_only_args = set(included_keywords)\n positional_args = set()\n args_with_defaults = set()\n for name, param in signature.parameters.items():\n if param.kind in {param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD}:\n positional_args.add(name)\n if not included_keywords and param.kind is param.POSITIONAL_OR_KEYWORD and param.default is not param.empty:\n kw_only_args.add(name)\n if param.default is not param.empty:\n args_with_defaults.add(name)\n\n @functools.wraps(func)\n def wrapper(*args, **kws):\n \"\"\"The decorator itself, checks arguments with set operations, moves\n args from *args into **kws, and then calls func().\n\n Args:\n *args, **kws: The arguments passed to the original function.\n\n Returns:\n The original function's result when it's called with the\n modified arguments.\n\n Raises:\n TypeError: When there is a mismatch between the supplied\n and expected arguments.\n\n \"\"\"\n keys = collections.KeysView(kws)\n # Are all the keyword-only args covered either by a passed\n # argument or a default?\n if not kw_only_args <= keys | args_with_defaults:\n wrong_args(func, signature, kw_only_args - (keys | args_with_defaults), 'keyword-only')\n # Are there enough positional args to cover all the\n # arguments not covered by a passed argument or a default?\n if len(args) < len(positional_args - (keys | args_with_defaults)):\n wrong_args(func, signature, positional_args - (keys | args_with_defaults), 'positional', len(args))\n\n args = list(args)\n for index, (name, param) in enumerate(signature.parameters.items()):\n if param.kind is param.VAR_POSITIONAL or param.kind is param.VAR_KEYWORD:\n break\n if name in kw_only_args or name in keys & positional_args:\n args.insert(index, kws.pop(name, param.default))\n func(*args, **kws)\n return wrapper\n\n def wrong_args(func, signature, missing_args, arg_type, number_of_args=0):\n \"\"\" Raise Python 3-style TypeErrors for missing arguments.\"\"\"\n ordered_args = [a for a in signature.parameters if a in missing_args]\n ordered_args = ordered_args[number_of_args:]\n error_message = ['%s() missing %d required %s argument' % (func.__name__, len(ordered_args), arg_type)]\n if len(ordered_args) == 1:\n error_message.append(\": '%s'\" % ordered_args[0])\n else:\n error_message.extend(['s: ', ' '.join(\"'%s'\" % a for a in ordered_args[:-1]), \" and '%s'\" % ordered_args[-1]])\n raise TypeError(''.join(error_message))\n\n return decorator", "def check_required_kwargs(self, **kwargs):\n for kwarg in self._required_kwargs:\n if kwarg not in self.options:\n raise TypeError(\"Missing required keyword argument '%s'\" % kwarg)", "def test_passed_unspecifiedKeyword(self):\n\n def func(a):\n pass\n\n self.assertRaises(TypeError, self.checkPassed, func, 1, z=2)", "def test_sensitive_function_keyword_arguments(self):\n with self.settings(DEBUG=True):\n self.verify_unsafe_response(sensitive_kwargs_function_caller)\n self.verify_unsafe_email(sensitive_kwargs_function_caller)\n\n with self.settings(DEBUG=False):\n self.verify_safe_response(\n sensitive_kwargs_function_caller, check_for_POST_params=False\n )\n self.verify_safe_email(\n sensitive_kwargs_function_caller, check_for_POST_params=False\n )", "def check_valid(sig, args, kwargs):\n num_pos_only, func, keyword_exclude, sigspec = sig\n if len(args) < num_pos_only:\n return False\n if keyword_exclude:\n kwargs = dict(kwargs)\n for item in keyword_exclude:\n kwargs.pop(item, None)\n try:\n func(*args, **kwargs)\n return True\n except TypeError:\n return False", "def check_has_required(self, args):\n for i in e.Environment.REQUIRED_KWARGS:\n if i not in args:\n return False\n return True", "def test_check_kwargs_strict():\n kwargs = dict(poolsize=100, volume_fraction=0.9, augment_dims=1)\n with pytest.raises(\n RuntimeError,\n match=\"Keyword arguments contain unknown keys: {'augment_dims'}\",\n ):\n check_proposal_kwargs(FlowProposal, kwargs, strict=True)", "def arg_filter(arg: str, keyword: str) -> bool:\n arg = arg.strip()\n return (\n arg.startswith(f\"--{keyword}=\")\n or arg.startswith(f\"-{keyword}=\")\n or arg == f\"--{keyword}\"\n or arg == f\"-{keyword}\"\n )", "def returns_kwarg(arg):\n return use_inferrer(lambda f, *argv, **kwargs: kwargs[arg])", "def _valid_keywords_or_raise(kwds, required=(), optional=()):\n keys = set(kwds)\n required = set(required)\n optional = required | set(optional)\n\n unknown = keys - optional\n if unknown:\n raise TypeError(\n \"invalid keyword arguments ({0} not in {{{1}}})\".format(\n \", \".join(sorted(repr(name) for name in unknown)),\n \", \".join(sorted(repr(name) for name in optional)),\n )\n )\n\n missing = required - keys\n if missing:\n raise TypeError(\n \"missing keyword arguments ({})\".format(\n \", \".join(sorted(repr(name) for name in missing))\n )\n )", "def _validate_kwargs(keyword_arguments):\n for argument in keyword_arguments:\n if argument not in OPTIONAL_PROPERTIES:\n raise TypeError(\n \"__init__() got an unexpected keyword argument \"\n \"'{0}'\".format(argument)\n )", "def has_required_arg(self):\n return any(arg.required for arg in self.arguments)", "def checkarg(kwds, arg):\n data = kwds.get(arg, None)\n cond = data and isinstance(data, (newstr, bytes))\n return cond" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the request_type of this RequestForMe.
def request_type(self): return self._request_type
[ "def get_request_type(self, ):\n return self._request_type", "def get_type(self) -> str:\n return self.request_type", "def get_request_type(type):\n uo_type = None\n if isinstance(type, (types.IntType, types.LongType)):\n uo_type = int(type)\n elif isinstance(type, UO):\n uo_type = type.uo_type\n return EBConsts.REQUEST_TYPES.get(uo_type, 'PROCESS')", "def get_object_type(self):\n object_type = self.request_data[\"object\"][\"type\"]\n self.logger.debug('get_object_type: %s' % object_type)\n return object_type", "def determine_request_type(self):\n if self.pageid is not '' and self.attachment is not '':\n self.request_type = 1\n elif self.pageid is not '':\n self.request_type = 2\n elif self.pagetitle is not '' and self.space is not '':\n self.request_type = 3\n elif self.tinyurl is not '':\n self.request_type = 4", "def type(self):\n return self.kwargs.get(\"type\", str)", "def get_type(self):\n return self.scope_type", "def _get_request_method(self):\n return self._request_method", "def get_current_scope_type(self):\n assert(len(self.scope_stack) != 0)\n\n return self.scope_stack[-1].get_type()", "def probe_request_type(self) -> Optional[pulumi.Input['HealthProbeRequestType']]:\n return pulumi.get(self, \"probe_request_type\")", "def caller_type(self):\n return self._caller_type", "def current_request_method(self):\n return self.request.method", "def content_type(self):\n return ContentType.objects.get_for_model(\n self.model, for_concrete_model=False\n )", "def get_type(self) -> ModelType:\n pass", "def request_model(self) -> str:\n return pulumi.get(self, \"request_model\")", "def model_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"model_type\")", "def get_requester(self) -> \"MTurkRequester\":\n if self.__requester is None:\n self.__requester = cast(\"MTurkRequester\", super().get_requester())\n return self.__requester", "def query_type(self) -> str:\n return pulumi.get(self, \"query_type\")", "def get_content_type(request: Request) -> str:\n return request.content_type.split(\";\")[0].strip()", "def type(self):\n return self.recipe_settings[\"type\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the request_type of this RequestForMe.
def request_type(self, request_type): self._request_type = request_type
[ "def get_request_type(self, ):\n return self._request_type", "def set_type(self, type):\n return _raw_util.raw_message_set_type(self, type)", "def input_type(self, input_type):\n\n self._input_type = input_type", "def set_task_type(self, task_type):\n self._task_type = task_type", "def get_type(self) -> str:\n return self.request_type", "def get_request_type(type):\n uo_type = None\n if isinstance(type, (types.IntType, types.LongType)):\n uo_type = int(type)\n elif isinstance(type, UO):\n uo_type = type.uo_type\n return EBConsts.REQUEST_TYPES.get(uo_type, 'PROCESS')", "def client_type(self, client_type):\n \n self._client_type = client_type", "def __set_content_type(self, content_type):\n self.__content_type = content_type", "def determine_request_type(self):\n if self.pageid is not '' and self.attachment is not '':\n self.request_type = 1\n elif self.pageid is not '':\n self.request_type = 2\n elif self.pagetitle is not '' and self.space is not '':\n self.request_type = 3\n elif self.tinyurl is not '':\n self.request_type = 4", "def set_content_type( self, type ):\n self.headers[ \"content-type\" ] = type", "def set_type(self, ttype):\n self.type = ttype\n self.token.type = ttype", "def set_doc_type(self, type_name):\n self._type = type_name\n return self", "def set_type(self, type):\n return _raw_util.raw_message_sptr_set_type(self, type)", "def experiment_type(self, experiment_type):\n\n self._experiment_type = experiment_type", "def ChangeType(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def type(self, type):\n allowed_values = [\"simple\", \"complex\", \"collection\"]\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\"\n .format(type, allowed_values)\n )\n\n self._type = type", "def set_type(self, rr_type):\n _ldns.ldns_rr_set_type(self, rr_type)\n #parameters: ldns_rr *, ldns_rr_type,\n #retvals:", "def document_type(self, document_type):\n if document_type is None:\n raise ValueError(\"Invalid value for `document_type`, must not be `None`\") # noqa: E501\n\n self._document_type = document_type", "def f_type(self, f_type):\n \n self._f_type = f_type", "def item_type(self, item_type):\n allowed_values = [\"simple\", \"complex\", \"collection\"]\n if item_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `item_type` ({0}), must be one of {1}\"\n .format(item_type, allowed_values)\n )\n\n self._item_type = item_type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionary containing minimum version for each python package.
def min_python_module_version(): ## read from file: prog2default.csv python_modules = file_list("python_requirements") package_min_versions = HCGB_main.file2dictionary(python_modules, ",") return(package_min_versions)
[ "def return_min_version_python_package(package):\r\n\tversion_package = min_python_module_version()\r\n\treturn (version_package[package])", "def get_package_versions() -> Dict[str, str]:\n import pkg_resources\n\n package_dict = pkg_resources.working_set.by_key # type: ignore\n package_version_dict = {key: val.version for key, val in package_dict.items()}\n return package_version_dict", "def get_installed_packages():\n installed_packages = {}\n for pkg_name in pypackagelist:\n try:\n import importlib\n i = importlib.import_module(pkg_name)\n if pkg_name is 'streamsx':\n import streamsx.topology.context\n print(pkg_name+' - ' + i.topology.context.__version__)\n installed_packages[pkg_name] = i.topology.context.__version__\n elif 'streamsx.standard' in pkg_name:\n import streamsx.standard._version\n print(pkg_name+' - ' + i._version.__version__)\n installed_packages[pkg_name] = i._version.__version__\n else:\n print(pkg_name+' - ' + i.__version__)\n installed_packages[pkg_name] = i.__version__\n except ImportError as error:\n print(pkg_name + ' NOT INSTALLED')\n return installed_packages", "def get_versions() -> dict:\n\n versions = {}\n try:\n import bel.__version__\n versions['bel_python_package'] = bel.__version__.__version__\n except ModuleNotFoundError:\n pass\n\n try:\n import __version__\n if __version__.__name__ == 'BELBIO API':\n versions['bel_api'] = __version__.__version__\n except ModuleNotFoundError:\n pass\n\n return versions", "def get_qcodes_requirements_versions() -> Dict[str, str]:\n\n req_names = get_qcodes_requirements()\n\n req_modules = []\n\n for req_name in req_names:\n if req_name in _BACKPORTED_PACKAGES:\n pass\n elif req_name in _IMPORT_NAMES:\n req_modules.append(_IMPORT_NAMES[req_name])\n else:\n req_modules.append(req_name)\n\n req_versions = {}\n\n for req_module in req_modules:\n mod = importlib.import_module(req_module)\n if req_module in _PACKAGE_NAMES:\n req_pkg = _PACKAGE_NAMES[req_module]\n else:\n req_pkg = req_module\n req_versions.update({req_pkg: mod.__version__}) # type: ignore\n\n return req_versions", "def min_version(self):\n data = self.version_downloads\n if not data:\n return (None, 0)\n return min(data.items(), key=lambda item: item[1])", "def get_versions():\n return [version for version in get_version_list() if has_package(version)]", "def get_required_packages(self):\n return _copy.copy(self._data['packages'])", "def get_package_properties(setup_py_path):\n pkgName, version, _, requires = parse_setup(setup_py_path)\n is_new_sdk = pkgName in NEW_REQ_PACKAGES or any(map(lambda x: (parse_require(x)[0] in NEW_REQ_PACKAGES), requires))\n return pkgName, version, is_new_sdk, setup_py_path", "def _get_package_versions_map(self, name):\n assert name\n normalized_name = NameVer.normalize_name(name)\n versions = self.packages[normalized_name]\n if not versions and normalized_name not in self.fetched_package_normalized_names:\n self.fetched_package_normalized_names.add(normalized_name)\n try:\n links = self.fetch_links(normalized_name=normalized_name)\n # note that thsi is sorted so the mapping is also sorted\n versions = {\n package.version: package\n for package in PypiPackage.packages_from_many_paths_or_urls(paths_or_urls=links)\n }\n self.packages[normalized_name] = versions\n except RemoteNotFetchedException as e:\n if TRACE:\n print(f\"failed to fetch package name: {name} from: {self.index_url}:\\n{e}\")\n\n if not versions and TRACE:\n print(f\"WARNING: package {name} not found in repo: {self.index_url}\")\n\n return versions", "def get_minimum_version(requirement):\n if not requirement.specs:\n warnings.warn(f\"No version specifier for {requirement.name} in \"\n \"install_requires. Using lowest available version on PyPi.\",\n stacklevel=2)\n content = urllib.request.urlopen(f'https://pypi.python.org/pypi/{requirement.name}/json').read()\n versions = sorted([pkg_resources.parse_version(v) for v in json.loads(content)[\"releases\"].keys()])\n for version in versions:\n if version in requirement:\n # If the requirement does not list any version, the lowest will be\n # returned\n return version\n # If the specified version does not exist on PyPi, issue a warning\n # and return the lowest available version\n warnings.warn(f\"Exact version specified in {requirement} not found \"\n \"on PyPi. Using lowest available version.\", stacklevel=2)\n return versions[0]", "def get_installedpackages_requires(package_list):\n\n # TODO finish me\n\n return []", "def analyze_versions(crate_data):\n def get_major(semver):\n if semver is None:\n return None\n digits = semver.lstrip(\"^\").split(\".\")\n if digits[0] != \"0\":\n return digits[0]\n else:\n return \"0.{}\".format(digits[1])\n dependencies = defaultdict(dict)\n versions = defaultdict(set)\n # Fill datastructure first.\n for data in crate_data:\n for dependency in data['dependencies'] + data['dev-dependencies']:\n dependencies[dependency['name']][data['name']] = get_major(dependency['version'])\n versions[dependency['name']].add(get_major(dependency['version']))\n\n for (dependency, version_set) in versions.items():\n if len(version_set) == 1:\n dependencies.pop(dependency)\n\n return dependencies", "def min_sdk(self):\n return self._get_sdk(\"min\")", "def list(self, package_name: str, include_pre: bool = False):\n versions = find_versions(package_name, include_pre)\n for v in sorted(set(v.version for v in versions)):\n print(v)", "def filter_old_versions(versions, minimum):\n min_major, min_minor = map(int, minimum.split('.'))\n\n for version in versions:\n major, minor, patch = map(int, version.split('.'))\n\n if major < min_major:\n continue\n\n if major == min_major and minor < min_minor:\n continue\n\n yield version", "def installed_packages(self):\n for package, st_dict in self._data['packages'].items():\n if st_dict['status'] == Status.INSTALLED.name:\n yield package", "def pkg_version(package):\n run(\"%sbin/pip freeze | grep %s\" % (venv, package))", "def test_requirement_versions():\n request = requests.get(\n \"https://raw.githubusercontent.com/home-assistant/home-assistant/dev/requirements_all.txt\"\n )\n requirements = {}\n for line in request.text.split(\"\\n\"):\n if \"=\" in line and not \"#\" in line:\n package = line.split(\">\")[0].split(\"=\")[0]\n version = line.split(\"=\")[-1]\n requirements[package] = version\n\n with open(MANIFEST_FILE, \"r\") as manifest_file:\n for line in json.loads(manifest_file.read())[\"requirements\"]:\n package = line.split(\">\")[0].split(\"=\")[0]\n version = line.split(\"=\")[-1]\n if package in requirements:\n if version != requirements[package]:\n warnings.warn(\n \"Package has different version from HA, this might casuse problems\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves minimum version requirement for the given package.
def return_min_version_python_package(package): version_package = min_python_module_version() return (version_package[package])
[ "def get_minimum_version(requirement):\n if not requirement.specs:\n warnings.warn(f\"No version specifier for {requirement.name} in \"\n \"install_requires. Using lowest available version on PyPi.\",\n stacklevel=2)\n content = urllib.request.urlopen(f'https://pypi.python.org/pypi/{requirement.name}/json').read()\n versions = sorted([pkg_resources.parse_version(v) for v in json.loads(content)[\"releases\"].keys()])\n for version in versions:\n if version in requirement:\n # If the requirement does not list any version, the lowest will be\n # returned\n return version\n # If the specified version does not exist on PyPi, issue a warning\n # and return the lowest available version\n warnings.warn(f\"Exact version specified in {requirement} not found \"\n \"on PyPi. Using lowest available version.\", stacklevel=2)\n return versions[0]", "def min_python_module_version():\r\n\t## read from file: prog2default.csv\r\n\tpython_modules = file_list(\"python_requirements\")\r\n\tpackage_min_versions = HCGB_main.file2dictionary(python_modules, \",\")\r\n\r\n\treturn(package_min_versions)", "def GetPackageVersion(self,package):\n\n print 'Get current package version of ' + str(package)\n\n ## Check that the nd280/v#r#p#/ directory is there \n nd280Dir=self.instDir + '/nd280/' + self.nd280ver \n if not os.path.isdir(nd280Dir):\n raise Error(nd280Dir + ' does not exist, so am unable to get version of ' + package)\n return 1\n\n # find the requirements file that enables this module\n command= \"grep -e \\\"use \"+package+\"[[:space:]]*v\\\" \"+self.instDir+\"/*/*/cmt/requirements\"\n lines,errors = runLCG(command,is_pexpect=False)\n if not lines or errors:\n print \"ERROR: unable to locate requirents for \"+package\n return 1\n\n version = lines[0].strip().split(' ')[-1]\n print 'Found '+package+' version '+version\n\n return version", "def min_version(self):\n data = self.version_downloads\n if not data:\n return (None, 0)\n return min(data.items(), key=lambda item: item[1])", "def get_version(package):\r\n init_py = open(os.path.join(package, '__init__.py')).read()\r\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)", "def version(package):\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)", "def get_version_from_package() -> str:\n\n path = os.path.join(os.path.dirname(__file__), \"pdchaoskit/__init__.py\")\n path = os.path.normpath(os.path.abspath(path))\n with open(path) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n token, version = line.split(\" = \", 1)\n version = version.replace(\"'\", \"\").strip()\n print(version)\n return version", "def getPackageVersion():\n cmd = locations.DPKG + \" -l \" + ' | grep surfids-sensor | awk \\'{print $3}\\''\n pversion = os.popen(cmd)\n ver = pversion.readline().strip()\n if ver == \"\":\n return \"Unknown\"\n else:\n return ver", "def get_package_version(self, name, version=None):\n if not version:\n versions = list(self._get_package_versions_map(name).values())\n return versions and versions[-1]\n else:\n return self._get_package_versions_map(name).get(version)", "def pkg_version(package):\n run(\"%sbin/pip freeze | grep %s\" % (venv, package))", "def getPackageVersion(package_info):\n\n # Parse for version_number\n package_version = re.search(version_pattern, package_info).group(0) # extract version_number\n\n return package_version", "def min_version(the_module, min_version_str: str = \"\") -> bool:\n if min_version_str:\n mod_version = tuple(int(x) for x in the_module.__version__.split(\".\")[:2])\n required = tuple(int(x) for x in min_version_str.split(\".\")[:2])\n return mod_version >= required\n return True # always valid version", "def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version", "def get_version():\n import pkg_resources # part of setuptools\n return pkg_resources.require(\"mbed-ls\")[0].version", "def check_module(name, min_version=None):\n\n name = '{}'.format(name)\n try:\n the_module = importlib.import_module(name)\n except ImportError:\n tf.logging.info(\n 'Optional Python module %s not found, '\n 'please install %s and retry if the application fails.',\n name, name)\n raise\n\n try:\n if min_version is not None:\n assert the_module.__version__ >= '{}'.format(min_version)\n except AttributeError:\n pass\n except AssertionError:\n tf.logging.info(\n 'Optional Python module %s version %s not found, '\n 'please install %s-%s and retry if the application fails.',\n name, min_version, name, min_version)\n raise", "def write_minimum_requirements_file():\n dist = pkg_resources.get_distribution('jwst')\n\n with open(\"requirements-min.txt\", \"w\") as fd:\n for requirement in dist.requires():\n if requirement.url is None:\n version = get_minimum_version(requirement)\n fd.write(f'{requirement.name}=={version}\\n')\n else:\n fd.write(f'{requirement}\\n')", "def get_latest_verobj(pkg):\n try:\n ver = pkg.versions[0]\n except AttributeError:\n return None\n\n return ver", "def minver_error(pkg_name):\n print(\n 'ERROR: specify minimal version of \"{0}\" using '\n '\">=\" or \"==\"'.format(pkg_name),\n file=sys.stderr\n )\n sys.exit(1)", "def get_ecr_vulnerability_package_version(vulnerability):\n for attribute in vulnerability[\"attributes\"]:\n if attribute[\"key\"] == \"package_version\":\n return attribute[\"value\"]\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a book to the shelf if there is room.
def AddBook(self, book): thickness = book.GetThickness() if self.__available_capacity >= thickness: self.__books[book.GetTitle()] = book self._ReduceCapacity(thickness) else: raise RuntimeError("Add failed: No space available on shelf.")
[ "def add_book(self, book: Book):\n self.books.append(book)", "def add_book(name):\n if check_book_in_library(name):\n msg = f\"{name} is already in the Library\"\n else:\n add_book_to_library(name)\n msg = f\"{name} added to the Library\"\n click.echo(msg)", "def create_book(self, title, ident):\n\n new_book = item.Book(title, ident)\n\n self.library_controller.add_item(new_book)", "def add_book():\n request_data = request.get_json()\n if is_valid_book_object(request_data):\n created_book = Book(request_data[\"type\"], request_data[\"title\"], request_data[\"creation_date\"])\n all_books.insert(0, created_book)\n response = Response(\"Successfully added!\", status=201, mimetype=\"application/json\")\n new_book_id = [book.id for book in all_books if book.title == request_data[\"title\"]]\n response.headers['Location'] = \"/v1/books/info/\" + new_book_id[0]\n app.logger.info(\"Book with id = {} was added\".format(new_book_id[0]))\n return response\n else:\n error_message = \"You passing an invalid book\"\n response = Response(error_message, status=406, mimetype=\"application/json\")\n app.logger.warning(\"Invalid book want to be passed: {}\".format(str(request_data)))\n return response", "def buy_book(self, book):\r\n self._balance += books[book]\r\n self._library += Book(book)", "def addBook(self, adding):\n if self.book >= 3:\n print(\"Can't borrow more books--MAX REACHED!\\n\")\n return \"Can't borrow more books--MAX REACHED!\"\n# elif adding.status == 0:\n# wait_list()\n elif adding.status == 1 and self.book < 3:\n self.book = self.book + 1\n if adding.catalog not in self.holding:\n self.holding.append(adding.catalog)\n else:\n adding.status = 0\n Book.library_members[self.name] = self.book\n Book.waiting[self.name] = Book.library_members[self.name]\n return self.holding\n elif adding.status == 0:\n Book.members_waiting.append(self.name)\n Patron.waitList()\n else:\n return \"error. cannot add\"", "def order_add_book(request, book):\n try:\n # now add this book to the current order and save it\n book.order = request.session['order']\n book.save()\n except KeyError:\n # there is no current order\n print(\"Tried to add a book to current order, but there isn't a current order\")\n raise KeyError", "def lendBook(self, book, libraryMember):\r\n if book in self.catalogue:\r\n if book.checkBookAvailibility() == True:\r\n if libraryMember.numberOfBook() < self.maxBookNumber:\r\n book.setLibraryMember(libraryMember)\r\n libraryMember.addBook(book)\r\n if libraryMember not in self.libraryMemberCurrentlyBorrowingBookList:\r\n self.libraryMemberCurrentlyBorrowingBookList.append(libraryMember)\r\n else:\r\n print(\"Error! The library member is currently borrowing 5 books, which is the maximum limit.\")\r\n else:\r\n print(\"Error! The book is currently unavailible.\")\r\n else:\r\n print(\"Error! The book is not in the catalogue.\")", "def add_new_book(self, in_title, in_author):\n title = in_title.lower()\n author = in_author.lower()\n if title and not title.isspace() and author and not author.isspace():\n if any(charecters.isdigit() for charecters in author)== True:\n return \"Write letters as author\"\n else:\n if (any(charecters.isalpha() for charecters in title) or any(characters.isdigit() for characters in title))== False or any(characters.isalpha() for characters in author)== False:\n return \"Fill in author AND title\"\n else:\n new_book = True\n for book in self.booklist:\n if book.title == title and book.author == author:\n return \"The book already exsists\"\n if new_book:\n self.booklist.append(Book(title+\"\", author+\"\", \"avalible\", \"nothing\", \"notimeset\"))\n return \"The book is now added\"\n else:\n return \"Fill in title AND author\"", "def create(self, book):\n return super(BookRepository, self).create(book)", "def checkAddBooking(self, booking):\n # first, check if booking is already expired\n if time.time() > booking['end']:\n return\n # check if booking is in database already\n b_id = (booking['id'], )\n self.cursor.execute(\"SELECT * FROM bookings WHERE id=?\", b_id)\n if len(self.cursor.fetchall()) > 0: # booking already in the db\n return\n tup = (\n booking['id'],\n booking['resource_id'],\n booking['start'],\n booking['end'],\n booking['installer_name'],\n booking['scenario_name'],\n booking['purpose'],\n 0,\n ''\n )\n self.cursor.execute(\n \"INSERT INTO bookings VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", tup)\n self.database.commit()", "def add_book_to_db(book: dict) -> None:\n if \"title\" in book:\n title = request.form['title']\n else:\n title = \"\"\n\n if \"authors\" in book:\n authors = \";\\n\".join(request.form['authors'].split(';'))\n else:\n authors = \"\"\n\n if \"publishedDate\" in book:\n published_date = request.form['publishedDate']\n else:\n published_date = \"\"\n\n if \"\" in book:\n industry_identifiers = request.form['industryIdentifiers']\n single_identifiers = industry_identifiers.split(';')\n industry_identifiers = \";\\n\".join([f\"{i.split(',')[0]}({i.split(',')[1]})\\n\" for i in single_identifiers])\n else:\n industry_identifiers = \"\"\n\n page_count = request.form['pageCount']\n links = \";\\n\".join(request.form['links'].split(','))\n languages = \";\\n\".join(request.form['languages'].split(','))\n\n book = Book(title=title,\n authors=authors,\n publishedDate=published_date,\n industryIdentifiers=industry_identifiers,\n pageCount=page_count,\n imageLinks=links,\n language=languages\n )\n\n DATABASE.session.add(book)\n DATABASE.session.commit()", "def book(self):\r\n if len(self.name_entry.get()) < 2:\r\n showerror('M.Y. Hotel', 'Reconsider Name field !')\r\n return\r\n elif self.check_in_cal.get_date() == self.check_out_cal.get_date() or self.check_out_cal.get_date() < \\\r\n self.check_in_cal.get_date():\r\n showerror('M.Y. Hotel', 'Reconsider Check-in - Check-out Dates !\\nNOTE: You can\\'t time travel OR jo'\r\n 'in and leave on the same day!')\r\n return\r\n elif len(self.phone_entry.get()) < 7:\r\n showerror('M.Y. Hotel', 'Reconsider Phone No field !')\r\n return\r\n elif len(self.rnum_entry.get()) < 1:\r\n showerror('M.Y. Hotel', 'Room No not entered !')\r\n return\r\n elif self.id is None:\r\n showerror('M.Y. Hotel', 'BOOK ID not generated !')\r\n return\r\n cursor.execute('SELECT ROOM_NO FROM HOTEL_INFO;')\r\n data = cursor.fetchall()\r\n b_rooms = []\r\n for i in range(len(data)):\r\n b_rooms.append(data[i][0])\r\n if int(self.rnum_entry.get()) in b_rooms: # if room already booked\r\n showerror('M.Y. Hotel', 'Room already occupied')\r\n return\r\n cursor.execute(f'SELECT ROOM_TYPE FROM HOTEL_INFO WHERE ROOM_TYPE=\"{self.room_combo.get()}\"')\r\n data = cursor.fetchall()\r\n num_booked = len(data)\r\n avail_rooms = int(creds_rtypes['h_type'][creds_rtypes['sel_h_type']][room_t.index(self.room_combo.get())]) - \\\r\n num_booked\r\n if avail_rooms < 1:\r\n showerror('M.Y. Hotel', f'No more {self.room_combo.get()} rooms available !')\r\n return\r\n org_price = int(creds_rtypes['h_r_price'][creds_rtypes['sel_h_type']][int(room_t.index(self.room_combo.get()))])\r\n tax = (int(creds_rtypes['tax'][list(creds_rtypes['tax'].keys())[0]]) / 100) * org_price + \\\r\n (int(creds_rtypes['tax'][list(creds_rtypes['tax'].keys())[1]]) / 100) * org_price + \\\r\n (int(creds_rtypes['tax'][list(creds_rtypes['tax'].keys())[2]]) / 100) * org_price\r\n self.price.insert(0, f'{int(round(org_price + tax, -1))}')\r\n\r\n def conf():\r\n \"\"\"Confirmation function for booking room\"\"\"\r\n ans = askyesno('M.Y. Hotel', f'Confirm if you want to book the room (ID: {self.id}) ?')\r\n if ans > 0:\r\n self.confirm_booking()\r\n pass\r\n else:\r\n return\r\n\r\n self.after(2000, conf)", "def user_add_book_to_wishlist_by_id(self, book_id, user_num):\n c = self.db.cursor()\n # Get the wishlist ID\n c.execute(\"\"\"SELECT id FROM Wishlists WHERE userId = ?;\"\"\", (user_num,))\n wishlist_id = c.fetchone()['id']\n\n # If the book is already in the wishlist, don't add it\n c.execute(\"\"\"SELECT * FROM WishlistsBooks WHERE wishlistId = ? AND bookId = ?;\"\"\", (wishlist_id, book_id))\n if c.fetchall():\n flash(\"Book already in your wishlist\", \"warning\")\n log.warning(f\"Book {book_id} already in \" +\n f\"user {user_num}'s wishlist\")\n # otherwise, add book to the wishlist\n else:\n c.execute(\"\"\"INSERT INTO WishlistsBooks (wishlistId, bookId) VALUES (?, ?);\"\"\", (wishlist_id, book_id))\n self.db.commit()\n flash(\"Book successfully added to your wishlist\", \"success\")\n log.info(f\"Book {book_id} added to wishlist {wishlist_id}\")", "def insert_book(self,isbn,uid=None):\n try:\n if not uid:\n uid=1\n book = self.get_book_byisbn(isbn)\n if book and book.id:\n #check if it's already in user book list?\n sqls=\"select 1 FROM %s WHERE `uid`=%d and `bid`=%d\" %(TABLE_USERBOOK,uid,book.id)\n\n result=db.query(sqls)\n\n if result:\n logger.debug((\"already exist:\",isbn))\n return \n else:\n self.add_userbook(uid,book.id)\n else:\n book = self.get_book_byisbn_fromremote(isbn)\n \n if book :\n t=db.transaction()\n bid = self.create_book(book)\n if bid:\n self.add_userbook(uid,bid)\n else:\n logger.warn(('failed to get bid:', bid))\n t.commit()\n else:\n logger.warn(('book not returned:%s' % isbn))\n except Exception,e:\n logger.error(e)", "def RemoveBook(self, title):\n stored_title = book.Book.TransformTitle(title)\n if stored_title in self.__books:\n stored_book = self.__books[stored_title]\n thickness = stored_book.GetThickness()\n del self.__books[stored_title]\n self._IncreaseCapacity(thickness)\n else:\n raise RuntimeError(\"Removal failed: Book not found in shelf.\")", "def rent_book(self, user, book):\n if book.in_stock > 0:\n # get rental on this book by the user\n try:\n r = self.get_rental(user, book)\n # if there is a rental by the user, raise a custom exception\n raise RentalExists(\"Book %s is already rented by %s\" % (book.title, user.username))\n except Rental.DoesNotExist:\n # if there is none create a new rental\n r = Rental.objects.create(book=book, rented_by=user)\n r.save()\n # remove the reservation if it exists\n Reservation.objects.remove_reservation(user, book)\n book.in_stock -= 1\n book.save()\n else:\n # if the book isn't in stock raise a custom exception\n raise BookNotInStock(\"Book %s is out of stock!\" % book.title)", "def add_room():\n print(\"ADD A ROOM\".center(80))\n print(\"-\".center(80, '-'))\n room = str(_get_room_name())\n db.execute('INSERT into room (name) VALUES (?)', (room,))\n db.commit()\n display_menu()", "def add_book_to_library(book_name):\n book_name = book_name.title()\n file[book_name] = {\"title\": book_name, \"return_date\": None}\n with open('personal_library.json', 'w') as f:\n json.dump(file, f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a book from the shelf if it resides on the shelf.
def RemoveBook(self, title): stored_title = book.Book.TransformTitle(title) if stored_title in self.__books: stored_book = self.__books[stored_title] thickness = stored_book.GetThickness() del self.__books[stored_title] self._IncreaseCapacity(thickness) else: raise RuntimeError("Removal failed: Book not found in shelf.")
[ "def remove_book(self, book: Book):\n self.books.remove(book)", "def remove_book(self):\n \n try:\n self.clr_scr()\n serial_no=input(\"Enter serial number of book:\\t\\t\") #enter serial_no of book you want to delete.\n Library.library.pop(serial_no,\"No such item to delete\")\n print(\"\\n\\n\")\n print('****************Book removed successfuly from library database.*********************')\n time.sleep(1)\n return self.main_menu()\n \n except Exception as msg:\n print(\"ERROR------->>>>>>\",msg)", "def remove_book(self, in_title, in_author):\n title=in_title.lower()\n author=in_author.lower()\n if title and not title.isspace() and author and not author.isspace():\n for book in self.booklist:\n if book.title==title and book.author==author and book.status==\"avalible\":\n self.booklist.remove(book)\n return(\"The book is now deleted\")\n elif book.title==title and book.author==author and book.status==\"borrowed\":\n return(\"The book must be retured back, can therefor not be removed.\")\n else:\n return(\"Book not found.\")\n else:\n return \"Fill in title AND author\"", "def delete_book(name):\n if check_book_in_library(name):\n delete_book_from_library(name)\n msg = f\"Deleted {name} from the library\"\n else:\n msg = (\"\" if name == \"\" else f\"{name} was not found in the Library\")\n click.echo(msg)", "def order_remove_book(request, book_pk):\n try:\n book = get_object_or_404(Book, pk=book_pk)\n if book.order == request.session['order']:\n book.delete()\n else:\n raise Exception(\"Tried to remove a book from the current order that wasn't in the current order\")\n except KeyError:\n logging.info(\"Tried to remove a book from the current order, but there isn't a current order\")\n raise\n\n return order_render_as_response(request)", "def del_bookmark(book_name):\n book_path = os.path.join(BOOKMARK_DIRECTORY, book_name)\n if os.path.isfile(book_path):\n os.remove(book_path)\n if '/' in book_name:\n book_sub = book_name.split('/')[:-1]\n if len(book_sub) > 0:\n for n in range(len(book_sub)):\n cs = ''.join(sd + '/' for sd in book_sub[:len(book_sub)-n])\n subpath = os.path.join(BOOKMARK_DIRECTORY, cs)\n if len(os.listdir(subpath)) < 1:\n os.rmdir(subpath)", "def deleteCodebook(self):\n if not self.delete_alert(message = \"This will delete the codebook as well as ALL ENTRIES contained within.\\nAre you sure? I mean, realllly sure?\"):\n return\n\n # get codebook directory\n cb_name, cb_dir = self.getCurrentCodebook()\n\n # remove the codebook from tabs\n self.entryTabs.clear()\n self.entryName.setText('')\n self.codebookTabs.removeTab(self.codebookTabs.currentIndex())\n\n # delete it\n shutil.rmtree(cb_dir)\n\n # remove the codebook from settings\n self.settings['open_codebooks'].remove(cb_name)\n self.settings['codebooks'].pop(cb_name)\n self.saveSettings()\n\n pass", "def delete_book_from_library(book_name):\n book_name = book_name.title()\n file.pop(book_name)\n with open('personal_library.json', 'w') as f:\n json.dump(file, f)", "def on_booklist_delete_clicked(self, obj):\n store, the_iter = self.blist.get_selected()\n if not the_iter:\n return\n data = self.blist.get_data(the_iter, [0])\n self.booklist.delete_book(cuni(data[0]))\n self.blist.remove(the_iter)\n self.unsaved_changes = True\n self.top.run()", "def delete_book(book_id):\n if len(MyLibrary.books) <= book_id or book_id < 0:\n abort(404)\n book = [MyLibrary.books[book_id]]\n MyLibrary.DeleteBook(book)\n return jsonify({'result': True})", "def remove(self, title):\n\t\tif title not in self.titles:\n\t\t\treturn\n\t\tfor bookmark in self.bookmarks:\n\t\t\tif bookmark.get(\"URIDictionary\") and bookmark[\"URIDictionary\"][\"title\"] == title:\n\t\t\t\tself.titles.remove(title)\n\t\t\t\tself.bookmarks.remove(bookmark)\n\t\t\t\treturn", "def delete_book(id):\n book = Book.query.get(id)\n if book is None:\n return jsonify({\"message\": \"ID does not exist!\"}), 400\n else:\n db.session.delete(book)\n db.session.commit()\n return book_Schema.jsonify(book)", "def remove_userbook(self,uid,bid):\n sqls=\"DELETE FROM %s WHERE `uid`=%d and `bid`=%d\" %(TABLE_USERBOOK,uid,bid)\n db.query(sqls)", "def removeEvent(self, bookID):\n try:\n listEvents = self.service.events().list(calendarId = \"primary\", q = bookID).execute()\n getEvents = listEvents.get(\"items\", [])\n for event in getEvents:\n eventId = event['id']\n event = self.service.events().delete(calendarId='primary', eventId= eventId).execute()\n print('Event removed from calendar')\n except:\n print(\"Doesn't exist on calendar\")\n pass", "def AddBook(self, book):\n thickness = book.GetThickness()\n if self.__available_capacity >= thickness:\n self.__books[book.GetTitle()] = book\n self._ReduceCapacity(thickness)\n else:\n raise RuntimeError(\"Add failed: No space available on shelf.\")", "def remove_reservation(self, r_id=None, user=None, book=None):\n try: # if the reservation exists remove it\n if user is not None:\n reservation = self.get(reserved_by=user, book=book)\n reservation = self.get(pk=r_id)\n reservation.delete()\n except Reservation.DoesNotExist: # else die quetly\n pass", "def removeBooking(self, idNum):\n booking_id = (idNum, )\n self.cursor.execute(\"DELETE FROM bookings WHERE id=?\", booking_id)", "def del_Book(self, Book_id):\n command = u\"\"\"self.cur.execute(\"DELETE FROM Book WHERE Book_id = %s\")\"\"\" % Book_id\n #print command\n exec(command)", "def removeResourceByObject(self, resource):\r\n if resource in self.catalogue:\r\n self.catalogue.remove(resource)\r\n else:\r\n print(\"Error! resource is not in the catalogue.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of books on the shelf.
def GetBookCount(self): return len(self.__books)
[ "def number_of_bookings(bookings_registry=DEFAULT_BOOKING_REGISTRY):\n return _db.number_of_items(Booking, bookings_registry)", "def getNumberOfRecipes():\n\n with sessionInstance() as session:\n return session.query(models.Recipe).count()", "def getNumEntries(self) -> \"int\":\n return _coin.SoNodekitCatalog_getNumEntries(self)", "def count(self):\n info = self.describe()\n return info['Table'].get('ItemCount', 0)", "def get_count(self):\n return self.hand.compute_bj_count()", "def numberOfResource(self):\r\n return len(self.catalogue)", "def instances_count(self):\n return self.bookinstance_set.count()", "def num_pages(self):\n return int(self._book_dict[\"num_pages\"])", "def count(self, isbn):\n if isbn in self.isbns:\n return self.isbns[isbn]\n\n return 0", "def get_number_of_sheets(self):\n return self.workbook.nsheets", "def num_items(bin):\n if bin not in bin_contents: return 0\n return len(bin_contents[bin])", "def get_number_of_entries(self):\n return self.mongo_db_service.entries", "def get_item_count(key=DEFAULT_KEY):\n client = get_redis_client()\n \n return client.llen(DEFAULT_KEY)", "def get_total_bets():\n\n return TABLE_BETS['id'].count()", "def _get_count(self) -> \"size_t\" :\n return _core.FavoriteMaterials__get_count(self)", "def _get_count(self) -> \"size_t\" :\n return _core.ProductPreferencesCollection__get_count(self)", "def getNumHistories(self) -> \"int\":\n return _coin.ScXMLStateElt_getNumHistories(self)", "def getNumHistories(self) -> \"int\":\n return _coin.ScXMLParallelElt_getNumHistories(self)", "def get_number_of_trucks(self):\n return len(self.trucks)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the initial capacity of the shelf.
def GetInitialCapacity(self): return self.__length
[ "def initial_size(self):\n ret = self._get_attr(\"initialSize\")\n return ret", "def capacity(cls):\n return WellPlate96.size() // PreyStoragePlate.capacity()", "def min_capacity(self) -> jsii.Number:\n return self._values.get(\"min_capacity\")", "def capacity(self):\n return", "def base_capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"base_capacity\")", "def capacity(self):\n return self.curr_capacity", "def base_capacity(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"base_capacity\")", "def capacity(self):\n return self.storage.size()", "def get_hypervisor_capacity(self, hypervisor, flavor, check=True):\n if hypervisor.vcpus < flavor.vcpus:\n capacity = 0\n elif flavor.disk > 0:\n capacity = min(\n hypervisor.disk_available_least // flavor.disk,\n hypervisor.free_ram_mb // flavor.ram)\n else:\n capacity = hypervisor.free_ram_mb // flavor.ram\n\n if check:\n assert_that(capacity, greater_than(0))\n\n return capacity", "def allocated_space(self):\n size = Size(0)\n\n if not self.partitions:\n return size\n\n for part in self.partitions:\n if part.percent_string:\n continue\n size += part.size\n\n return size", "def capacity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> int:\n return sys.maxsize", "def get_minimum_part_size(self):\n return self._get_account_info_or_raise('minimum_part_size')", "def capacity(self):\n return self._get_val_float(_VG_CAPACITY)", "def getCapacityFactor(self): \n return self.capFact", "def preferred_disk_block_size(self):\n return self._preferred_disk_block_size", "def size(self):\n try:\n # IF THIS DOESN'T WORK, CLIENT IS DISCONNECTED\n temp = plasma.ObjectID.from_random()\n self.client.put(5, temp)\n self.client.delete([temp])\n except:\n traceback.print_exc()\n #raise BrainClientDisconnectedError\n self.bytes = self.client.store_capacity()\n self.mb = \"{} MB\".format(round(self.bytes / 1000000))\n return self.bytes", "def remaining_capacity(self):\n for k in range(len(self.listPeople)):\n self.pounds = self.pounds + self.listPeople[k].weight\n return self.capacity - self.pounds", "def remaining_capacity(self):\n return self.__remaining_capacity" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduces shelf capacity (after a book is added).
def _ReduceCapacity(self, thickness): self.__available_capacity -= thickness
[ "def AddBook(self, book):\n thickness = book.GetThickness()\n if self.__available_capacity >= thickness:\n self.__books[book.GetTitle()] = book\n self._ReduceCapacity(thickness)\n else:\n raise RuntimeError(\"Add failed: No space available on shelf.\")", "def _reduce_quantity_goods(cls, name, quantity):\n cls.warehouse[name.lower()]['_quantity'] -= quantity", "def change_bucketWidth(self, newWidth):\n res = []\n for k,v in self.buckets.items():\n res.extend(v)\n self.bucketWidth = newWidth\n self.data = res\n self._distributeElementsIntoBuckets()", "def RemoveBook(self, title):\n stored_title = book.Book.TransformTitle(title)\n if stored_title in self.__books:\n stored_book = self.__books[stored_title]\n thickness = stored_book.GetThickness()\n del self.__books[stored_title]\n self._IncreaseCapacity(thickness)\n else:\n raise RuntimeError(\"Removal failed: Book not found in shelf.\")", "def remove_reserve(self):\r\n self._reserves -= 1", "def take_item(self):\n if (self.quantity > 0):\n self.quantity -= 1\n else:\n raise OutOfStock", "def add_reserve(self):\r\n self._reserves += 1", "def stretch_and_unstretch(rec, amount):\n print(\" Stretch-Unstretch Bitcrusher, {0}%...\".format(amount))\n rec.stretch(1/amount)\n rec.stretch(amount)", "def add_reserve_piece(self):\n self._reserve_pieces += 1", "def _resize(self, new_size=None):\n # If unspecified, choose new size dynamically based on current size\n if new_size is None:\n new_size = len(self.buckets) * 2 # Double size \n # Option to reduce size if buckets are sparsely filled (low load factor)\n elif new_size is 0:\n new_size = len(self.buckets) / 2 # Half size\n # Get a list to temporarily hold all current key-value entries\n entries = self.items() # O(n)\n # Create a new list of new_size total empty linked list buckets\n # Insert each key-value entry into the new list of buckets,\n # which will rehash them into a new bucket index based on the new size\n self.__init__(new_size, entries) # O(n)", "def saveShelf():\n pass", "def shelf():\r\n\r\n return box(pos=vector(0, 0.1/2, 0), size=vector(2, 0.1, 2), color=color.white)", "def _resize(self):\n\n # get a list of all items in the hash table\n items = self.get_items()\n\n # reset size for hash table\n self.size = 0\n\n # generate new slots of double current slots\n self.slots = [LinkedList() for i in range(len(self.slots) * 2)]\n\n # rehash each item\n for key, value in items:\n self.set(key, value)", "def add_node(self, node):\n super(Bucket, self).add_node(node)\n self.adjust_capacity_up(node.free_capacity)", "def reduce_health(self, amount):\r\n self.current_health -= amount", "def _resize(self, cap): #assume cap >= len(self)\n\t\told = self._data #keep track of existing list\n\t\tself._data = [None] * cap #allocate list with new capacity\n\t\twalk = self._front\n\t\tfor k in range(self._size): #only consider existing elements\n\t\t\tself._data[k] = old[walk] #intentionally shift indices\n\t\t\twalk = (1 + walk) % len(old) #use old size as modulus\n\t\tself._front = 0 #front has been aligned", "def adjust_health(self,ammount):\n self.health += ammount", "def upgrade_battery(self):\n\t\tif self.battery_size < 85 or self.battery_size > 85:\n\t\t\tself.battery_size = 85", "def remaining_capacity(self):\n for k in range(len(self.listPeople)):\n self.pounds = self.pounds + self.listPeople[k].weight\n return self.capacity - self.pounds" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that applies a simple onereactant oneproduct reaction SMILES to a list of input RDKit molecules, returning the products as a list of RDKit molecules.
def simple_rxn(mol_list, rxn, debug=False): prod_list = [] for mol in mol_list: if debug: logging.info('Input: '+ MolToSmiles(mol)) products = rxn.RunReactants((Chem.AddHs(mol),)) if debug: logging.info('Products: {}'.format(products)) if products != (): for prod in products: if debug: logging.info(prod) logging.info(MolToSmiles(prod[0])) prod_list.append(prod[0]) return prod_list
[ "def pair_rxnts(mol1_list, mol2_list, rxn, debug=False): \n prod_list = []\n for mol1 in mol1_list:\n for mol2 in mol2_list:\n\n products = rxn.RunReactants((Chem.AddHs(mol1),Chem.AddHs(mol2)))\n if debug:\n logging.info(products)\n if products != ():\n for prod in products:\n if debug:\n logging.info(MolToSmiles(prod[0]))\n prod_list.append(prod[0])\n return prod_list", "def _list_product(l):\n return reduce(lambda x, y: x*y, l, 1)", "def sustitute(prod,prodList):\n\t\tprint('i Receive')\n\t\tprint('prod:')\n\t\tprint(prod)\n\t\tprint('prodList:')\n\t\tprint(prodList)\n\t\tnewList=prodList[0:len(prodList)]\n\t\tfor strng in prodList: \t\t\t\t\t\t\t\n\t\t\tif prod.Left[0] in strng:\t\n\t\t\t\tnewList.remove(strng)\n\t\t\t\tfor der in prod.Right:\n\t\t\t\t\tnewList.append(strng.replace(prod.Left[0],der))\n\t\tprint('Result:')\n\t\tprint (newList)\n\n\t\treturn newList", "def _split_reaction_monom(reaction, species):\n ratenumer, ratedenom = reaction.rate.cancel().as_numer_denom()\n ratenumer = ratenumer.expand()\n species = map(sp.Symbol, species)\n ratendict = sp.Poly(ratenumer, *species).as_dict()\n if len(ratendict) > 1:\n reactions = []\n\n i = 0\n for degrees in ratendict:\n i = i + 1\n ratenpart = sp.Mul(*[species[r]**degrees[r] for r in range(len(species))]) * ratendict[degrees]\n reactions.append(Reaction(reaction.reactionid + \"_\" + str(i), \\\n reaction.reactant, \\\n reaction.product, \\\n ratenpart / ratedenom))\n return reactions\n return [reaction]", "def product(*args):\n return map(''.join, itertools.product(*args))", "def test_add_atom_labels_for_reaction_r_recombination(self):\n reactants = [Species().from_smiles('C[CH2]'), Species().from_smiles('[CH3]')]\n products = [Species().from_smiles('CCC')]\n\n reaction = TemplateReaction(reactants=reactants, products=products)\n\n self.database.kinetics.families['R_Recombination'].add_atom_labels_for_reaction(reaction)\n\n expected_reactants = [\n Molecule().from_adjacency_list(\"\"\"\nmultiplicity 2\n1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 * C u1 p0 c0 {1,S} {6,S} {7,S}\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n\"\"\"),\n Molecule().from_adjacency_list(\"\"\"\nmultiplicity 2\n1 * C u1 p0 c0 {2,S} {3,S} {4,S}\n2 H u0 p0 c0 {1,S}\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n\"\"\")]\n\n expected_products = [\n Molecule().from_adjacency_list(\"\"\"\n1 * C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 * C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p0 c0 {1,S} {9,S} {10,S} {11,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n10 H u0 p0 c0 {3,S}\n11 H u0 p0 c0 {3,S}\n\"\"\")]\n\n for i, reactant in enumerate(reaction.reactants):\n mapping = {}\n for label, atom in expected_reactants[i].get_all_labeled_atoms().items():\n mapping[atom] = reactant.molecule[0].get_labeled_atoms(label)[0]\n\n self.assertTrue(expected_reactants[i].is_isomorphic(reactant.molecule[0], mapping))\n\n for i, product in enumerate(reaction.products):\n # There are two identical labels in the product, so we need to check both mappings\n # Only one of the mappings will result in isomorphic structures though\n atoms_a = expected_products[i].get_labeled_atoms('*')\n atoms_b = product.molecule[0].get_labeled_atoms('*')\n mapping1 = {atoms_a[0]: atoms_b[0], atoms_a[1]: atoms_b[1]}\n mapping2 = {atoms_a[0]: atoms_b[1], atoms_a[1]: atoms_b[0]}\n\n results = [\n expected_products[i].is_isomorphic(product.molecule[0], mapping1),\n expected_products[i].is_isomorphic(product.molecule[0], mapping2)\n ]\n\n self.assertTrue(any(results))\n self.assertFalse(all(results))", "def multiplys(self, c_list):\n first = c_list[0]\n for i in range(1, len(c_list)):\n first.multiply(c_list[i])\n return first", "def square_list(original_list):\n return [multiplication(x, x) for x in original_list]", "def test_get_single_mapped_product_xyz(self):\n # Trivial unimolecular with an intentional mixed atom order: H2O <=> H2O\n h2o_xyz_1 = \"\"\"O -0.00032832 0.39781490 0.00000000\n H -0.76330345 -0.19953755 0.00000000\n H 0.76363177 -0.19827735 0.00000000\"\"\"\n r_1 = ARCSpecies(label='H2O', smiles='O', xyz=h2o_xyz_1)\n h2o_xyz_2 = \"\"\"H -0.76330345 -0.19953755 0.00000000\n H 0.76363177 -0.19827735 0.00000000\n O -0.00032832 0.39781490 0.00000000\"\"\"\n p_1 = ARCSpecies(label='H2O', smiles='O', xyz=h2o_xyz_2)\n rxn_1 = ARCReaction(reactants=['H2O'], products=['H2O'],\n r_species=[r_1], p_species=[p_1])\n mapped_product = rxn_1.get_single_mapped_product_xyz()\n self.assertEqual(rxn_1.atom_map, [2, 0, 1])\n self.assertTrue(check_atom_map(rxn_1))\n expected_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),\n 'coords': ((-0.00032832, 0.3978149, 0.0), (-0.76330345, -0.19953755, 0.0),\n (0.76363177, -0.19827735, 0.0))}\n self.assertEqual(mapped_product.get_xyz(), expected_xyz)\n\n reactant_xyz = \"\"\"C -1.3087 0.0068 0.0318\n C 0.1715 -0.0344 0.0210\n N 0.9054 -0.9001 0.6395\n O 2.1683 -0.5483 0.3437\n N 2.1499 0.5449 -0.4631\n N 0.9613 0.8655 -0.6660\n H -1.6558 0.9505 0.4530\n H -1.6934 -0.0680 -0.9854\n H -1.6986 -0.8169 0.6255\"\"\"\n reactant = ARCSpecies(label='reactant', smiles='C([C]1=[N]O[N]=[N]1)', xyz=reactant_xyz)\n product_xyz = \"\"\"C -1.0108 -0.0114 -0.0610\n C 0.4780 0.0191 0.0139\n N 1.2974 -0.9930 0.4693\n O 0.6928 -1.9845 0.8337\n N 1.7456 1.9701 -0.6976\n N 1.1642 1.0763 -0.3716\n H -1.4020 0.9134 -0.4821\n H -1.3327 -0.8499 -0.6803\n H -1.4329 -0.1554 0.9349\"\"\"\n product = ARCSpecies(label='product', smiles='[N-]=[N+]=C(N=O)C', xyz=product_xyz)\n rxn_2 = ARCReaction(r_species=[reactant], p_species=[product])\n self.assertTrue(check_atom_map(rxn_2))\n mapped_product = rxn_2.get_single_mapped_product_xyz()\n self.assertEqual(rxn_2.atom_map[:6], [0, 1, 2, 3, 4, 5])\n self.assertIn(rxn_2.atom_map[6], [6, 8])\n self.assertIn(rxn_2.atom_map[7], [6, 7])\n self.assertIn(rxn_2.atom_map[8], [7, 8])\n expected_xyz = {'symbols': ('C', 'C', 'N', 'O', 'N', 'N', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 14, 16, 14, 14, 1, 1, 1),\n 'coords': ((-1.0108, -0.0114, -0.061), (0.478, 0.0191, 0.0139), (1.2974, -0.993, 0.4693),\n (0.6928, -1.9845, 0.8337), (1.7456, 1.9701, -0.6976), (1.1642, 1.0763, -0.3716),\n (-1.4329, -0.1554, 0.9349), (-1.402, 0.9134, -0.4821), (-1.3327, -0.8499, -0.6803))}\n self.assertEqual(mapped_product.get_xyz(), expected_xyz)\n\n reactant_xyz = \"\"\"C -1.3087 0.0068 0.0318\n C 0.1715 -0.0344 0.0210\n N 0.9054 -0.9001 0.6395\n O 2.1683 -0.5483 0.3437\n N 2.1499 0.5449 -0.4631\n N 0.9613 0.8655 -0.6660\n H -1.6558 0.9505 0.4530\n H -1.6934 -0.0680 -0.9854\n H -1.6986 -0.8169 0.6255\"\"\"\n reactant = ARCSpecies(label='reactant', smiles='C([C]1=[N]O[N]=[N]1)', xyz=reactant_xyz)\n product_xyz = \"\"\"C -1.0108 -0.0114 -0.0610\n C 0.4780 0.0191 0.0139\n N 1.2974 -0.9930 0.4693\n O 0.6928 -1.9845 0.8337\n N 1.7456 1.9701 -0.6976\n N 1.1642 1.0763 -0.3716\n H -1.4020 0.9134 -0.4821\n H -1.3327 -0.8499 -0.6803\n H -1.4329 -0.1554 0.9349\"\"\"\n product = ARCSpecies(label='product', smiles='[N-]=[N+]=C(N=O)C', xyz=product_xyz)\n rxn_2 = ARCReaction(r_species=[reactant], p_species=[product])\n self.assertTrue(check_atom_map(rxn_2))\n mapped_product = rxn_2.get_single_mapped_product_xyz()\n self.assertEqual(rxn_2.atom_map[:6], [0, 1, 2, 3, 4, 5])\n self.assertIn(rxn_2.atom_map[6], [6, 8])\n self.assertIn(rxn_2.atom_map[7], [6, 7])\n self.assertIn(rxn_2.atom_map[8], [7, 8])\n expected_xyz = {'symbols': ('C', 'C', 'N', 'O', 'N', 'N', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 14, 16, 14, 14, 1, 1, 1),\n 'coords': ((-1.0108, -0.0114, -0.061), (0.478, 0.0191, 0.0139), (1.2974, -0.993, 0.4693),\n (0.6928, -1.9845, 0.8337), (1.7456, 1.9701, -0.6976), (1.1642, 1.0763, -0.3716),\n (-1.4329, -0.1554, 0.9349), (-1.402, 0.9134, -0.4821), (-1.3327, -0.8499, -0.6803))}\n self.assertEqual(mapped_product.get_xyz(), expected_xyz)", "def get_products(self, *args, **kwargs):\n return _decomp.SOMcomponent_get_products(self, *args, **kwargs)", "def generateReactions(self, reactants, products=None, **options):\n reactionList = []\n reactionList.extend(self.generateReactionsFromLibraries(reactants, products, **options))\n reactionList.extend(self.generateReactionsFromFamilies(reactants, products, **options))\n return reactionList", "def mult_numbers(number_list):\n # Failed this one for the same reason, same line of thinking. Commenting out my \n # attempt so you can see it without breaking terminal.\n # product = []\n # for item in number_list:\n # number_list == item * item\n # product = number_list\n # return product", "def product(sequence):\n return _functools.reduce(_operator.mul, sequence)", "def prod(iterable):\n\treturn reduce(operator.mul, iterable) # replace with math.prod when 3.8 is standard", "def test_intra_ene_reaction(self):\n family = self.database.families['Intra_ene_reaction']\n reactants = [Molecule().from_adjacency_list(\"\"\"\n1 *1 C u0 p0 c0 {2,S} {3,S} {4,S} {10,S}\n2 *5 C u0 p0 c0 {1,S} {5,D} {6,S}\n3 *2 C u0 p0 c0 {1,S} {7,D} {11,S}\n4 C u0 p0 c0 {1,S} {8,D} {12,S}\n5 *4 C u0 p0 c0 {2,D} {7,S} {13,S}\n6 C u0 p0 c0 {2,S} {9,D} {15,S}\n7 *3 C u0 p0 c0 {3,D} {5,S} {14,S}\n8 C u0 p0 c0 {4,D} {9,S} {17,S}\n9 C u0 p0 c0 {6,D} {8,S} {16,S}\n10 *6 H u0 p0 c0 {1,S}\n11 H u0 p0 c0 {3,S}\n12 H u0 p0 c0 {4,S}\n13 H u0 p0 c0 {5,S}\n14 H u0 p0 c0 {7,S}\n15 H u0 p0 c0 {6,S}\n16 H u0 p0 c0 {9,S}\n17 H u0 p0 c0 {8,S}\n\"\"\")]\n expected_product = Molecule().from_adjacency_list(\"\"\"\n1 *2 C u0 p0 c0 {2,D} {3,S} {4,S} \n2 *3 C u0 p0 c0 {1,D} {5,S} {6,S}\n3 *1 C u0 p0 c0 {1,S} {7,S} {11,S} {10,S}\n4 C u0 p0 c0 {1,S} {8,D} {12,S}\n5 *4 C u0 p0 c0 {2,S} {7,D} {13,S}\n6 C u0 p0 c0 {2,S} {9,D} {15,S}\n7 *5 C u0 p0 c0 {3,S} {5,D} {14,S}\n8 C u0 p0 c0 {4,D} {9,S} {17,S}\n9 C u0 p0 c0 {6,D} {8,S} {16,S}\n10 *6 H u0 p0 c0 {3,S}\n11 H u0 p0 c0 {3,S}\n12 H u0 p0 c0 {4,S}\n13 H u0 p0 c0 {5,S}\n14 H u0 p0 c0 {7,S}\n15 H u0 p0 c0 {6,S}\n16 H u0 p0 c0 {9,S}\n17 H u0 p0 c0 {8,S}\n\"\"\")\n products = family.apply_recipe(reactants)\n\n self.assertEqual(len(products), 1)\n\n mapping = {}\n for label, atom in expected_product.get_all_labeled_atoms().items():\n mapping[atom] = products[0].get_labeled_atoms(label)[0]\n\n self.assertTrue(expected_product.is_isomorphic(products[0], mapping))", "def _cartesian_product_of_elements(self, elements):\n elements = tuple(elements)\n assert len(elements) == len(self._sets)\n return self.element_class(self, elements)", "def product_on_basis(self, monoid_el1, monoid_el2):\n monoid_el_product = monoid_el1 * monoid_el2\n return self._element_constructor_(monoid_el_product)", "def _prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def _join_product(*iterables):\n end_product = []\n for iterable in iterables:\n if len(iterable) > 1:\n end_product.extend(iterable)\n else:\n end_product.append(iterable[0])\n return tuple(end_product)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that applies a tworeactant oneproduct reaction SMILES to two lists of input RDKit molecules, returning the products as a list of RDKit molecules.
def pair_rxnts(mol1_list, mol2_list, rxn, debug=False): prod_list = [] for mol1 in mol1_list: for mol2 in mol2_list: products = rxn.RunReactants((Chem.AddHs(mol1),Chem.AddHs(mol2))) if debug: logging.info(products) if products != (): for prod in products: if debug: logging.info(MolToSmiles(prod[0])) prod_list.append(prod[0]) return prod_list
[ "def _join_product(*iterables):\n end_product = []\n for iterable in iterables:\n if len(iterable) > 1:\n end_product.extend(iterable)\n else:\n end_product.append(iterable[0])\n return tuple(end_product)", "def simple_rxn(mol_list, rxn, debug=False): \n prod_list = []\n for mol in mol_list:\n if debug:\n logging.info('Input: '+ MolToSmiles(mol))\n products = rxn.RunReactants((Chem.AddHs(mol),))\n if debug:\n logging.info('Products: {}'.format(products))\n if products != ():\n for prod in products:\n if debug:\n logging.info(prod)\n logging.info(MolToSmiles(prod[0]))\n prod_list.append(prod[0])\n return prod_list", "def product(self, other, rename_vertices=True):\n if not rename_vertices:\n return [Simplex(x) for x in lattice_paths(self.tuple(), other.tuple())]\n\n answer = []\n for x in lattice_paths(self.tuple(), other.tuple()):\n new = tuple([\"L\" + str(v) + \"R\" + str(w) for (v, w) in x])\n answer.append(Simplex(new))\n return answer", "def get_cartesian_product(lists):\n return [i for i in itertools.product(*lists)]", "def _cartesian_product_of_elements(self, elements):\n elements = tuple(elements)\n assert len(elements) == len(self._sets)\n return self.element_class(self, elements)", "def product(*args):\n return map(''.join, itertools.product(*args))", "def product_on_basis(self, monoid_el1, monoid_el2):\n monoid_el_product = monoid_el1 * monoid_el2\n return self._element_constructor_(monoid_el_product)", "def array_product(list1, list2):\n \n lgth = len(list1)\n product_list = []\n \n for i in range(0, lgth):\n product_list.append(list1[i] * list2[i])\n \n return product_list", "def _irrep_product(self, other):\n return tuple(a*b for a, b in zip(self.irrep, other.irrep))", "def cartesian_product(a,b):\n return [(x,y) for x in a for y in b ]", "def sentence_combination(list_1, list_2):\n return [(term_1 + ' ' + term_2) for term_1 in list_1 for term_2 in list_2]", "def test_add_atom_labels_for_reaction_r_recombination(self):\n reactants = [Species().from_smiles('C[CH2]'), Species().from_smiles('[CH3]')]\n products = [Species().from_smiles('CCC')]\n\n reaction = TemplateReaction(reactants=reactants, products=products)\n\n self.database.kinetics.families['R_Recombination'].add_atom_labels_for_reaction(reaction)\n\n expected_reactants = [\n Molecule().from_adjacency_list(\"\"\"\nmultiplicity 2\n1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 * C u1 p0 c0 {1,S} {6,S} {7,S}\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n\"\"\"),\n Molecule().from_adjacency_list(\"\"\"\nmultiplicity 2\n1 * C u1 p0 c0 {2,S} {3,S} {4,S}\n2 H u0 p0 c0 {1,S}\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n\"\"\")]\n\n expected_products = [\n Molecule().from_adjacency_list(\"\"\"\n1 * C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 * C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p0 c0 {1,S} {9,S} {10,S} {11,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n10 H u0 p0 c0 {3,S}\n11 H u0 p0 c0 {3,S}\n\"\"\")]\n\n for i, reactant in enumerate(reaction.reactants):\n mapping = {}\n for label, atom in expected_reactants[i].get_all_labeled_atoms().items():\n mapping[atom] = reactant.molecule[0].get_labeled_atoms(label)[0]\n\n self.assertTrue(expected_reactants[i].is_isomorphic(reactant.molecule[0], mapping))\n\n for i, product in enumerate(reaction.products):\n # There are two identical labels in the product, so we need to check both mappings\n # Only one of the mappings will result in isomorphic structures though\n atoms_a = expected_products[i].get_labeled_atoms('*')\n atoms_b = product.molecule[0].get_labeled_atoms('*')\n mapping1 = {atoms_a[0]: atoms_b[0], atoms_a[1]: atoms_b[1]}\n mapping2 = {atoms_a[0]: atoms_b[1], atoms_a[1]: atoms_b[0]}\n\n results = [\n expected_products[i].is_isomorphic(product.molecule[0], mapping1),\n expected_products[i].is_isomorphic(product.molecule[0], mapping2)\n ]\n\n self.assertTrue(any(results))\n self.assertFalse(all(results))", "def list_mul(list_a, list_b):\n return [a*b for a, b in zip(list_a, list_b)]", "def matmul(list1, list2):\n from symgp.superexpressions import SuperMatMul, SuperMatAdd\n \n \n # Handle multiplication by integers\n if isinstance(list1, int):\n return _mul_with_num(list1, list2)\n \n if isinstance(list2, int):\n return _mul_with_num(list2, list1)\n\n # Check sizes and reshape if necessary\n m1, n1, broadcast_list1 = _check_shape_matmul(list1, 'left')\n m2, n2, broadcast_list2 = _check_shape_matmul(list2, 'right')\n \n # Check shapes\n if n1 != m2:\n raise Exception(\"Shapes don't match: %s, %s\" % ((m1, n1), (m2, n2)))\n \n # Multiply based on types of lists\n if broadcast_list1 and broadcast_list2: # (1,n1) x (m2,1)\n out_list = [SuperMatAdd(*[SuperMatMul(list1[i],list2[i]).doit() for i in range(n1)]).doit()]\n elif broadcast_list1: # (1,n1) x (m2,n2)\n out_list = [0 for _ in range(n2)]\n for i in range(n2):\n out_list[i] = SuperMatAdd(*[SuperMatMul(list1[j],list2[j][i]).doit() for j in range(m2)]).doit()\n elif broadcast_list2: # (m1,n1) x (m2,1)\n out_list = [0 for _ in range(m1)]\n for i in range(m1):\n out_list[i] = SuperMatAdd(*[SuperMatMul(list1[i][j],list2[j]).doit() for j in range(m2)]).doit()\n else: # (m1,n1) x (m2,n2) \n out_list = [[0 for _ in range(n2)] for _ in range(m1)]\n for i in range(m1):\n for j in range(n2):\n out_list[i][j] = SuperMatAdd(*[SuperMatMul(list1[i][k],list2[k][j]).doit() for k in range(n1)]).doit()\n \n return out_list", "def _list_product(l):\n return reduce(lambda x, y: x*y, l, 1)", "def match_cross(lsts):\n return list(map(list, zip(*itertools.product(*lsts))))", "def product(fa: Iterable[A], fb: Iterable[B]) -> Iterable[Tuple[A, B]]:\n fbs = list(fb)\n\n # Because pylint does not allow `lambda a: fmap(lambda b: (a, b), fbs)`.\n def ff(a: A) -> Iterable[Tuple[A, B]]:\n return fmap(lambda b: (a, b), fbs)\n\n return flatmap(ff, fa)", "def _blend(coeffs,*args):\n if isinstance(args[0],Iterable):\n c = type(args[0])\n return c([_blend(coeffs,*args_i) for args_i in zip(*args)])\n else:\n return dotProduct(coeffs,args)", "def cartesian(lst1, lst2):\r\n list3 = []\r\n for i in range(len(lst1)):\r\n for j in range(len(lst2)):\r\n list3.append([lst1[i],lst2[j]]) #add in a loop each component\r\n #within lst1 to each component in lst2\r\n return list3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
replaces the external networks by xwards and equivalent impedance
def _replace_external_area_by_xwards(net_external, bus_lookups, xward_parameter_no_power, impedance_parameter, ext_buses_with_xward, show_computing_time, calc_volt_angles=True, runpp_fct=_runpp_except_voltage_angles): t_start = time.perf_counter() # --- drop all external elements e_buses_pd = bus_lookups["bus_lookup_pd"]["e_area_buses"] pp.drop_buses(net_external, e_buses_pd) drop_internal_branch_elements(net_external, bus_lookups["boundary_buses_inclusive_bswitch"]) # --- drop shunt elements attached to boundary buses traget_shunt_idx = net_external.shunt.index[net_external.shunt.bus.isin(bus_lookups[ "boundary_buses_inclusive_bswitch"])] net_external.shunt.drop(traget_shunt_idx, inplace=True) # --- creat impedance sn = net_external.sn_mva for idx in impedance_parameter.index: from_bus = impedance_parameter.from_bus[idx] to_bus = impedance_parameter.to_bus[idx] if abs(impedance_parameter.rft_pu[idx]) > 1e-8 or \ abs(impedance_parameter.xft_pu[idx]) > 1e-8 or \ abs(impedance_parameter.rtf_pu[idx]) > 1e-8 or \ abs(impedance_parameter.xtf_pu[idx]) > 1e-8: pp.create_impedance(net_external, from_bus, to_bus, impedance_parameter.rft_pu[idx], impedance_parameter.xft_pu[idx], sn_mva=net_external.sn_mva, rtf_pu=impedance_parameter.rtf_pu[idx], xtf_pu=impedance_parameter.xtf_pu[idx], name="eq_impedance") else: pp.create_switch(net_external, from_bus, to_bus, "b", name="eq_switch") # --- creata xward for i in xward_parameter_no_power.index: target_bus = xward_parameter_no_power.bus_pd[i] pp.create_xward(net_external, target_bus, 0.0, # np.nan_to_num(-xward_parameter.power_eq[i].real), 0.0, # np.nan_to_num(-xward_parameter.power_eq[i].imag), xward_parameter_no_power.shunt[i].real * sn / xward_parameter_no_power.vm_pu[i]**2, 0.0, xward_parameter_no_power.r_ohm[i], np.nan_to_num(xward_parameter_no_power.x_ohm[i]), # neginf=1e100 is commented since this led to error xward_parameter_no_power.vm_pu[i], name="network_equivalent") eq_power = net_external.res_ext_grid.copy() eq_power["bus"] = net_external.ext_grid.bus.values eq_power["elm"] = "ext_grid" slack_gen = net_external.gen.index[net_external.gen.slack] if len(slack_gen) != 0: for i in slack_gen: new_eq_power = \ [net_external.res_gen.p_mw[i], net_external.res_gen.q_mvar[i],\ net_external.gen.bus[i], "gen"] eq_power.loc[len(eq_power)] = new_eq_power assert len(eq_power.bus) == len(set(eq_power.bus)) # only one slack at individual bus runpp_fct(net_external, calculate_voltage_angles=calc_volt_angles, tolerance_mva=1e-6, max_iteration=100) eq_power.p_mw -= \ pd.concat([net_external.res_ext_grid.p_mw, net_external.res_gen.p_mw[slack_gen]]) eq_power.q_mvar -= \ pd.concat([net_external.res_ext_grid.q_mvar, net_external.res_gen.q_mvar[slack_gen]]) for bus in eq_power.bus: net_external.xward.ps_mw[net_external.xward.bus==bus] = \ eq_power.p_mw[eq_power.bus==bus].values net_external.xward.qs_mvar[net_external.xward.bus==bus] = \ eq_power.q_mvar[eq_power.bus==bus].values net_external.poly_cost=net_external.poly_cost[0:0] net_external.pwl_cost=net_external.pwl_cost[0:0] if len(ext_buses_with_xward): pp.drop_buses(net_external, net_external.bus.index.tolist()[-(len(ext_buses_with_xward)):]) t_end = time.perf_counter() if show_computing_time: logger.info("\"replace_external_area_by_xwards\" finished in %s seconds:" % round(( t_end-t_start), 2))
[ "def annplastic(adaptation):\n # layers network\n inLayer = LinearLayer(100)\n hiddenLayer = LinearLayer(adaptation)\n outLayer = LinearLayer(10)\n # mount layers\n n.addInputModule(inLayer)\n n.addModule(hiddenLayer)\n n.addOutputModule(outLayer)\n # stabilise type confections\n in_to_hidden = FullConnection(inLayer, hiddenLayer)\n hidden_to_out = FullConnection(hiddenLayer, outLayer)\n # conecct the layers\n n.addConnection(in_to_hidden)\n n.addConnection(hidden_to_out)\n # start the module\n n.sortModules()", "def input_impedance(self): \n \n # DUT BRANCH\n Z_DUT = []\n Z_DUT.append(ZL_2_Zin(self.TLs[0].L, self.TLs[0].Zc, self.gammas[0], self.Z_short_DUT))\n Z_DUT.append(ZL_2_Zin(self.TLs[1].L, self.TLs[1].Zc, self.gammas[1], Z_DUT[0]))\n Z_DUT.append(ZL_2_Zin(self.TLs[2].L, self.TLs[2].Zc, self.gammas[2], Z_DUT[1]))\n Z_DUT.append(ZL_2_Zin(self.TLs[3].L, self.TLs[3].Zc, self.gammas[3], Z_DUT[2]))\n Z_DUT.append(ZL_2_Zin(self.TLs[4].L, self.TLs[4].Zc, self.gammas[4], Z_DUT[3]))\n \n # CEA BRANCH\n Z_CEA = []\n Z_CEA.append(ZL_2_Zin(self.TLs[8].L, self.TLs[8].Zc, self.gammas[8], self.Z_short_CEA))# 9\n Z_CEA.append(ZL_2_Zin(self.TLs[7].L, self.TLs[7].Zc, self.gammas[7], Z_CEA[0])) # 8\n Z_CEA.append(ZL_2_Zin(self.TLs[6].L, self.TLs[6].Zc, self.gammas[6], Z_CEA[1])) # 7\n Z_CEA.append(ZL_2_Zin(self.TLs[5].L, self.TLs[5].Zc, self.gammas[5], Z_CEA[2])) # 6\n \n # At T-junction. Impedance are associated in parallel.\n Zin = (Z_DUT[-1]*Z_CEA[-1])/(Z_DUT[-1] + Z_CEA[-1])\n \n return Zin, Z_CEA, Z_DUT", "def train_refinement_network():\n \n model_dict = {} # all the different models\n model_dict['UNet'] = UNet\n model_dict['UNetLite'] = UNetLite\n model_dict['UNetWide40'] = UNetWide40\n model_dict['UNetWide48'] = UNetWide48\n model_dict['UNetDS64'] = UNetDS64\n model_dict['UNetWide64'] = UNetWide64\n model_dict['MultiResUNet1D'] = MultiResUNet1D\n model_dict['MultiResUNetDS'] = MultiResUNetDS\n\n\n mdlName1 = 'UNetDS64' # approximation network\n mdlName2 = 'MultiResUNet1D' # refinement network\n \n length = 1024 # length of the signal\n\n # 10 fold cross validation\n for foldname in range(10):\n\n print('----------------')\n print('Training Fold {}'.format(foldname+1))\n print('----------------')\n # loading training data\n dt = pickle.load(open(os.path.join('data','train{}.p'.format(foldname)),'rb'))\n X_train = dt['X_train']\n Y_train = dt['Y_train']\n # loading validation data\n dt = pickle.load(open(os.path.join('data','val{}.p'.format(foldname)),'rb'))\n X_val = dt['X_val']\n Y_val = dt['Y_val']\n\n # loading metadata\n dt = pickle.load(open(os.path.join('data','meta{}.p'.format(foldname)),'rb'))\n max_ppg = dt['max_ppg']\n min_ppg = dt['min_ppg']\n max_abp = dt['max_abp']\n min_abp = dt['min_abp']\n\n\n Y_train = prepareLabel(Y_train) # prepare labels for training deep supervision\n \n Y_val = prepareLabel(Y_val) # prepare labels for training deep supervision\n \n \n mdl1 = model_dict[mdlName1](length) # load approximation network\n mdl1.load_weights(os.path.join('models','{}_model1_fold{}.h5'.format(mdlName1,foldname))) # load weights\n\n X_train = prepareDataDS(mdl1, X_train) # prepare training data for 2nd stage, considering deep supervision\n X_val = prepareDataDS(mdl1, X_val) # prepare validation data for 2nd stage, considering deep supervision\n\n mdl1 = None # garbage collection\n\n \n mdl2 = model_dict[mdlName2](length) # create refinement network\n\n # loss = mse\n mdl2.compile(loss='mean_squared_error',optimizer='adam',metrics=['mean_absolute_error'])\n\n checkpoint2_ = ModelCheckpoint(os.path.join('models','{}_model2_fold{}.h5'.format(mdlName2,foldname)), verbose=1, monitor='val_loss',save_best_only=True, mode='auto') \n\n # train refinement network for 100 epochs\n history2 = mdl2.fit(X_train,Y_train['out'],epochs=100,batch_size=192,validation_data=(X_val,Y_val['out']),callbacks=[checkpoint2_])\n\n pickle.dump(history2, open('History/{}_model2_fold{}.p'.format(mdlName2,foldname),'wb')) # save training history\n\n time.sleep(300) # pause execution for a while to free the gpu", "def update_ports(self):\n\n if self.to_i != None:\n self.from_e.ports[self.from_i - 1].networks = self.to_e.ports[self.to_i - 1].networks\n else:\n self.from_e.ports[self.from_i - 1].networks = [self.to_e]", "def process_dual_diagrams(self):\n ags_net=self.dic_attr['ags_net']\n form_orig_net=self.dic_attr['form_orig_net']\n force_orig_net=self.dic_attr['force_orig_net']\n map_edg_orig_dic=self.dic_attr['map_edg_orig_dic']\n q_c=self.dic_attr['q_c'] # force_densities, based on dic_attr['edg_dic'] indeces(indeces of original ags_net)\n edg_dic=self.dic_attr['edg_dic'] # the dictionary with original indeces\n\n # map the original edges to their forces\n old_edg_f_dic={} # {old_edg:f}\n for ind, edg in edg_dic.items():\n old_q=round(q_c[ind][0], 1)\n old_len=hf.edge_length(ags_net, edg)\n old_edg_f_dic[edg]=(old_q*old_len).item() # .item() to make it reabale in ironpytho (numpyfloat64>>float)\n \n # update the dual edge mapping (removing repetative vertices of force)\n map_edg_temp_dic=hf.update_dual_mapping_1(force_orig_net, map_edg_orig_dic)\n\n # update the dual edge mapping\n map_edg_dic, new_edg_f_dic=hf.update_dual_mapping_2(form_orig_net, map_edg_temp_dic, old_edg_f_dic)\n\n # make a new form_net (without aligned edges)\n form_net=hf.make_new_network(form_orig_net, list(map_edg_dic.keys()))\n\n # make a new dual (force) network without repetative egdes and vertices\n force_net=hf.make_new_network(force_orig_net, list(map_edg_dic.values()))\n\n # rotate force_net 90 degrees\n ANG=np.pi/2.0\n force_90_net=hf.rotate_dual(force_net , ANG)\n\n # dictionary of dual vertices\n dual_ver_dic={}\n for key in force_net.nodes():\n dual_ver_dic[key]=force_net.node_coordinates(key)\n\n # ### save the data to draw form and force diagrams in Rhino ###\n with open(os.path.join(BASEDIR, 'map_edg_dic.p'), 'wb') as fp:\n pickle.dump(map_edg_dic, fp, protocol=2)\n with open(os.path.join(BASEDIR, 'new_edg_f_dic.p'), 'wb') as fp:\n pickle.dump(new_edg_f_dic, fp, protocol=2)\n with open(os.path.join(BASEDIR, 'dual_ver_dic.p'), 'wb') as fp:\n pickle.dump(dual_ver_dic, fp, protocol=2) \n\n self.dic_attr['map_edg_dic']=map_edg_dic\n self.dic_attr['form_net']=form_net\n self.dic_attr['force_net']=force_net\n self.dic_attr['force_90_net']=force_90_net\n self.dic_attr['new_edg_f_dic']=new_edg_f_dic # {new_edg:f} ", "def ensemble_swap(ens1, ens2):\n\n if ens1.temp != ens2.temp:\n ens1.temp, ens2.temp = ens2.temp, ens1.temp\n if ens1.pext != ens2.pext:\n ens1.pext, ens2.pext = ens2.pext, ens1.pext\n if np.linalg.norm(ens1.stressext - ens2.stressext) > 1e-10:\n tmp = dstrip(ens1.stressext).copy()\n ens1.stressext[:] = ens2.stressext\n ens2.stressext[:] = tmp\n if len(ens1.bweights) != len(ens2.bweights):\n raise ValueError(\n \"Cannot exchange ensembles that have different numbers of bias components\"\n )\n if len(ens1.hweights) != len(ens2.hweights):\n raise ValueError(\n \"Cannot exchange ensembles that are described by different forces\"\n )\n if not np.array_equal(ens1.bweights, ens2.bweights):\n ens1.bweights, ens2.bweights = (\n dstrip(ens2.bweights).copy(),\n dstrip(ens1.bweights).copy(),\n )\n if not np.array_equal(ens1.hweights, ens2.hweights):\n ens1.hweights, ens2.hweights = (\n dstrip(ens2.hweights).copy(),\n dstrip(ens1.hweights).copy(),\n )", "def reset(self):\n \n# copy reference network \n self.net2 = copy.deepcopy(self.net1)\n\n # set all lines out of service\n self.net2.switch.closed = False\n \n self.cranked_isolated_sgen = False\n \n # scale sgens by determining random time of outage. \n t = random.randint(0, len(self.scaling_wind)-1)\n self.net2.sgen.scaling[self.net2.sgen[\"type\"]==\"wind\"] = self.scaling_wind.electricity.at[t]\n self.net2.sgen.scaling[self.net2.sgen[\"type\"]==\"solar\"] = self.scaling_pv.electricity.at[t]\n self.time = self.scaling_wind.time.at[t]\n \n # set storages at random SOC between .5 and 1 \n self.net2.storage.soc_percent = random.randint(5,10)/10\n \n pp_helpers.set_unsupplied_areas_out_of_service(self.net2)\n \n self.curr_step = -1\n self.curr_episode += 1\n self.action_episode_memory.append([])\n self.initial_obs.append([])\n self.is_net_restored = False\n self.restoration_failed = False\n self.info = {}\n \n return self._get_state()", "def train_approximate_network():\n \n model_dict = {} # all the different models\n model_dict['UNet'] = UNet\n model_dict['UNetLite'] = UNetLite\n model_dict['UNetWide40'] = UNetWide40\n model_dict['UNetWide48'] = UNetWide48\n model_dict['UNetDS64'] = UNetDS64\n model_dict['UNetWide64'] = UNetWide64\n model_dict['MultiResUNet1D'] = MultiResUNet1D\n model_dict['MultiResUNetDS'] = MultiResUNetDS\n\n\n mdlName1 = 'UNetDS64' # approximation network\n mdlName2 = 'MultiResUNet1D' # refinement network\n \n length = 1024 # length of the signal\n\n try: # create directory to save models\n os.makedirs('models')\n except:\n pass\n\n try: # create directory to save training history\n os.makedirs('History')\n except:\n pass\n\n # 10 fold cross validation\n for foldname in range(10):\n\n print('----------------')\n print('Training Fold {}'.format(foldname+1))\n print('----------------')\n # loading training data\n dt = pickle.load(open(os.path.join('data','train{}.p'.format(foldname)),'rb'))\n X_train = dt['X_train']\n Y_train = dt['Y_train']\n # loading validation data\n dt = pickle.load(open(os.path.join('data','val{}.p'.format(foldname)),'rb'))\n X_val = dt['X_val']\n Y_val = dt['Y_val']\n\n # loading metadata\n dt = pickle.load(open(os.path.join('data','meta{}.p'.format(foldname)),'rb'))\n max_ppg = dt['max_ppg']\n min_ppg = dt['min_ppg']\n max_abp = dt['max_abp']\n min_abp = dt['min_abp']\n\n\n Y_train = prepareLabel(Y_train) # prepare labels for training deep supervision\n \n Y_val = prepareLabel(Y_val) # prepare labels for training deep supervision\n \n\n \n mdl1 = model_dict[mdlName1](length) # create approximation network\n\n # loss = mae, with deep supervision weights\n mdl1.compile(loss='mean_absolute_error',optimizer='adam',metrics=['mean_squared_error'], loss_weights=[1., 0.9, 0.8, 0.7, 0.6]) \n\n\n checkpoint1_ = ModelCheckpoint(os.path.join('models','{}_model1_fold{}.h5'.format(mdlName1,foldname)), verbose=1, monitor='val_out_loss',save_best_only=True, mode='auto') \n # train approximation network for 100 epochs\n history1 = mdl1.fit(X_train,{'out': Y_train['out'], 'level1': Y_train['level1'], 'level2':Y_train['level2'], 'level3':Y_train['level3'] , 'level4':Y_train['level4']},epochs=100,batch_size=256,validation_data=(X_val,{'out': Y_val['out'], 'level1': Y_val['level1'], 'level2':Y_val['level2'], 'level3':Y_val['level3'] , 'level4':Y_val['level4']}),callbacks=[checkpoint1_],verbose=1)\n\n pickle.dump(history1, open('History/{}_model1_fold{}.p'.format(mdlName1,foldname),'wb')) # save training history\n\n\n mdl1 = None # garbage collection\n\n time.sleep(300) # pause execution for a while to free the gpu", "def impedance(address, name):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.measure_imp()", "def test_remap_nonlocal_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_nonlocal_quantum_error(error1, ['u3'], [0], [1], False)\n model.add_nonlocal_quantum_error(error2, ['cx'], [1, 2], [3, 0], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_nonlocal_quantum_error(error1, ['u3'], [1], [2], False)\n target.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False)\n self.assertEqual(remapped_model, target)", "def betweenness(infile, recalculate = False):\n\n #seperate network into sub\n g = networkx.read_gml(infile,label='id')\n fungigraph = g.subgraph([n for n,attrdict in g.node.items() if attrdict ['group']=='fungi'])\n bactigraph = g.subgraph([n for n,attrdict in g.node.items() if attrdict ['group'] == 'Bact' ] )\n lipidgraph = g.subgraph([n for n,attrdict in g.node.items() if attrdict ['group'] == 'lipid' ] )\n ##\n \n #remove node\n def rem_betw(subgraph,g):\n m = networkx.betweenness_centrality(subgraph)\n l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)\n x = []\n y = []\n largest_component = max(networkx.connected_components(g), key = len)\n n = len(subgraph.nodes())\n x.append(0)\n y.append(len(largest_component) * 1. / n)\n R = 0.0\n for i in range(1, n):\n g.remove_node(l.pop(0)[0])\n if recalculate:#True, then restore all nodes. Therefore, False\n m = networkx.betweenness_centrality(g)\n l = sorted(m.items(), key = operator.itemgetter(1), \n reverse = True)\n largest_component = max(networkx.connected_components(g), key = len)\n #print(len(g.nodes()))\n x.append(i * 1. / n)\n R += len(largest_component) * 1. / n\n y.append(len(largest_component) * 1. / n)\n return x, y, 0.5 - R / n\n d = {\"fungibet\" : rem_betw(fungigraph,g),\n \"bactibet\": rem_betw(bactigraph,g),\n \"lipidbet\": rem_betw(lipidgraph,g)}\n return(pandas.DataFrame.from_dict(d,orient='index').transpose())", "def _convert_shapes2(self, design):\n for _nn in design.nets:\n _web = None\n if 'type' in _nn.attributes:\n if 'bus' == _nn.attributes['type']:\n _width = 0.762\n _web = Eagle.Bus(name=_nn.net_id)\n self.shapeheader.buses.append(_web)\n else:\n _clrs = []\n _attrre = re.compile(r'^netclearance(\\d+)$')\n for _aa in _nn.attributes:\n _attr = _attrre.match(_aa)\n if None != _attr:\n _clrs.append((_attr.group(1), _nn.attributes[_aa]))\n\n self.netclasses.append(Eagle.NetClass( # duplicates are cleared below\n num=_nn.attributes['netclass'], \n name=_nn.attributes['netname'], \n width=_nn.attributes['netwidth'],\n drill=_nn.attributes['netdrill'],\n clearances=_clrs,\n ))\n _width = 0.1524 # _nn.attributes['netwidth']\n _web = Eagle.Net(name=_nn.net_id, \n nclass=_nn.attributes['netclass'])\n self.shapeheader.nets.append(_web)\n else:\n _width = 0.1524\n _web = Eagle.Net(name=_nn.net_id, nclass=0)\n self.shapeheader.nets.append(_web)\n\n _prpts = set() # processed points\n for _pp in _nn.points:\n _pt = _nn.points[_pp]\n for _opp in _pt.connected_points:\n if not _opp in _prpts: # not yet processed\n _opt = None\n try:\n _opt = _nn.points[_opp]\n except KeyError: # not from current net\n for _xxn in design.nets:\n if _opp in _xxn.points:\n _opt = _xxn.points[_opp]\n break\n else:\n raise ValueError(\"undefined point ID: %s\" % str(_opp))\n\n _web.shapes.append(Eagle.Wire(\n x1=_pt.x, y1=_pt.y,\n x2=_opt.x,\n y2=_opt.y,\n style=\"Continuous\", layer=91, width=_width))\n\n _prpts.add(_pp)\n letter_pin_numbers = []\n for _rr in _pt.connected_components:\n _pno = -1\n for _in, _ii in enumerate(self.shapeheader.parts):\n if _rr.instance_id == _ii.name:\n _pno = 1 + _in\n break\n try:\n pin_number = int(_rr.pin_number)\n except ValueError:\n if letter_pin_numbers:\n pin_number = letter_pin_numbers.pop() + 1\n else: \n pin_number = 1\n letter_pin_numbers.append(pin_number)\n\n _web.shapes.append(Eagle.PinRef(\n partno= _pno, gateno=1, \n pinno=pin_number,\n ))\n return", "def test_resnet(self):\n RepurposerTestUtils.download_resnet()\n mod = mx.mod.Module.load('resnet-101', 0)\n mh = model_handler.ModelHandler(mod)\n old_layer_names = mh.layer_names\n\n mh.drop_layer_top()\n mh.drop_layer_bottom()\n\n assert sorted(list(set(old_layer_names).difference(set(mh.layer_names)))) == sorted(['bn_data', 'softmax'])", "def backpropagate(self, expected):\n\n #Assigns delta values to each node in the output layer and calculates momentum\n for i in range(len(self.output_layer)):\n node = self.output_layer[i]\n node.delta_weight = expected[i] - node.output\n\n #Backpropagates errors through hidden layers\n for i in reversed(range(len(self.NN[:-1]))):\n layer = self.NN[i]\n #Iterates through each node in a layer\n for j in range(len(layer)):\n error = 0\n cur_node = layer[j]\n #Iterates through each node in the next layer up\n for node in self.NN[i+1]:\n error += node.weights[j] * node.delta_weight\n\n cur_node.delta_weight = error * cur_node.derivative()", "def update_target_model(self):\n self.target_network.set_weights(self.q_network.get_weights())\n # vedere se funziona invece questo\n #for t, e in zip(self.target_network.trainable_variables,\n # self.primary_network.trainable_variables): t.assign(t * (1 - TAU) + e * TAU)", "def refresh(self):\n # We check for the current networks in range.\n visible_networks_levels = self.check_networks()\n # For each network present in the \"networks\" attribute:\n for network in self.networks.keys():\n # if it is not visible,\n if not network in visible_networks_levels.keys():\n # its frequency is decremented by 1 and its signal level is set to None,\n self.networks[network][\"rate\"] -= 1\n self.networks[network][\"level\"] = None\n # and if its frequency is now equal to 0, the audio announcement is made and the network is removed from the \"networks\" attribute.\n if self.networks[network][\"rate\"] == 0:\n audio.say(u\"%s out of range\" % network)\n del self.networks[network]\n # For each visible network:\n for network in visible_networks_levels.keys():\n # if the network is not yet in the \"networks\" attribute, the audio announcement is made, and it is added to the \"networks\" attribute with the frequency 0 and an arbitrary signal level (i.e. \"None\");\n if not network in self.networks.keys():\n audio.say(u\"%s now in range\" % network)\n self.networks[network] = {\"rate\" : 0, \"level\" : None}\n # in any case, the frequency associated to the network is incremented by 1 and its signal level is updated.\n self.networks[network][\"rate\"] += 1\n self.networks[network][\"level\"] = visible_networks_levels[network]", "def random_network_updating(self):\n\n #update fire states with interface vector\n for i in range(self.interface_dim):\n self.network_state[i]=self.network_state[i]+self.interface[0,0,i]\n \n for i in range(self.netsize):\n s=np.dot(self.network_state,self.randomnetwork[i])*self.network_forget_rate\n if s>0:\n self.network_state[i]=s\n else:\n self.network_state[i]=0\n \n #changing size\n temp=np.zeros(self.interface_dim)\n for i in range(self.interface_dim):\n temp[i]=self.network_state[i]\n \n self.interface=temp\n self.interface=tf.convert_to_tensor(self.interface.reshape(1,self.interface_dim))", "def update_prev_q_network(self):\n self.params_prev_q_network = jax.tree_map(lambda x: x.copy(),\n self.params_q_network)", "def test_replace(self):\n\n intf_pfxs = [[], [], [], []]\n\n # add prefixes to each of the interfaces\n for i in range(len(self.pg_interfaces)):\n intf = self.pg_interfaces[i]\n\n # 2001:16:x::1/64\n addr = \"2001:16:%d::1\" % intf.sw_if_index\n a = VppIpInterfaceAddress(self, intf, addr, 64).add_vpp_config()\n intf_pfxs[i].append(a)\n\n # 2001:16:x::2/64 - a different address in the same subnet as above\n addr = \"2001:16:%d::2\" % intf.sw_if_index\n a = VppIpInterfaceAddress(self, intf, addr, 64).add_vpp_config()\n intf_pfxs[i].append(a)\n\n # 2001:15:x::2/64 - a different address and subnet\n addr = \"2001:15:%d::2\" % intf.sw_if_index\n a = VppIpInterfaceAddress(self, intf, addr, 64).add_vpp_config()\n intf_pfxs[i].append(a)\n\n # a dump should n_address in it\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 3)\n\n #\n # remove all the address thru a replace\n #\n self.vapi.sw_interface_address_replace_begin()\n self.vapi.sw_interface_address_replace_end()\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 0)\n\n #\n # add all the interface addresses back\n #\n for p in intf_pfxs:\n for v in p:\n v.add_vpp_config()\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 3)\n\n #\n # replace again, but this time update/re-add the address on the first\n # two interfaces\n #\n self.vapi.sw_interface_address_replace_begin()\n\n for p in intf_pfxs[:2]:\n for v in p:\n v.add_vpp_config()\n\n self.vapi.sw_interface_address_replace_end()\n\n # on the first two the address still exist,\n # on the other two they do not\n for intf in self.pg_interfaces[:2]:\n self.assertEqual(self.get_n_pfxs(intf), 3)\n for p in intf_pfxs[:2]:\n for v in p:\n self.assertTrue(v.query_vpp_config())\n for intf in self.pg_interfaces[2:]:\n self.assertEqual(self.get_n_pfxs(intf), 0)\n\n #\n # add all the interface addresses back on the last two\n #\n for p in intf_pfxs[2:]:\n for v in p:\n v.add_vpp_config()\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 3)\n\n #\n # replace again, this time add different prefixes on all the interfaces\n #\n self.vapi.sw_interface_address_replace_begin()\n\n pfxs = []\n for intf in self.pg_interfaces:\n # 2001:18:x::1/64\n addr = \"2001:18:%d::1\" % intf.sw_if_index\n pfxs.append(VppIpInterfaceAddress(self, intf, addr, 64).add_vpp_config())\n\n self.vapi.sw_interface_address_replace_end()\n\n # only .18 should exist on each interface\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 1)\n for pfx in pfxs:\n self.assertTrue(pfx.query_vpp_config())\n\n #\n # remove everything\n #\n self.vapi.sw_interface_address_replace_begin()\n self.vapi.sw_interface_address_replace_end()\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 0)\n\n #\n # add prefixes to each interface. post-begin add the prefix from\n # interface X onto interface Y. this would normally be an error\n # since it would generate a 'duplicate address' warning. but in\n # this case, since what is newly downloaded is sane, it's ok\n #\n for intf in self.pg_interfaces:\n # 2001:18:x::1/64\n addr = \"2001:18:%d::1\" % intf.sw_if_index\n VppIpInterfaceAddress(self, intf, addr, 64).add_vpp_config()\n\n self.vapi.sw_interface_address_replace_begin()\n\n pfxs = []\n for intf in self.pg_interfaces:\n # 2001:18:x::1/64\n addr = \"2001:18:%d::1\" % (intf.sw_if_index + 1)\n pfxs.append(VppIpInterfaceAddress(self, intf, addr, 64).add_vpp_config())\n\n self.vapi.sw_interface_address_replace_end()\n\n self.logger.info(self.vapi.cli(\"sh int addr\"))\n\n for intf in self.pg_interfaces:\n self.assertEqual(self.get_n_pfxs(intf), 1)\n for pfx in pfxs:\n self.assertTrue(pfx.query_vpp_config())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies the coder to the text. Returns the encoded text.
def applyCoder(text, coder): newtext="" for i in range(len(text)): if text[i].isalpha(): newtext+=coder[text[i]] else: newtext+=text[i] return newtext
[ "def apply_coder(text, coder):\n ### TODO.\n codedText = ''\n for char in text:\n if char in coder:\n codedText += coder[char]\n else:\n codedText += char\n return codedText", "def encode_text(self, text):\n encoded_text = \"\"\n for char in text:\n encoded_text += self.codes[char]\n return encoded_text", "def getEncodedText(self, text: str) -> str:\n res = \"\"\n for char in text:\n res += self.codes[char]\n\n return res", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if 'encode_re_braces' not in self.__dict__:\n self.encode_re_braces = self.re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}', text)\n if 'encode_re_bslash' not in self.__dict__:\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = self.re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n if not (self.literal_block or self.literal or self.mathmode):\n # the vertical bar: in mathmode |,\\vert or \\mid\n # in textmode \\textbar\n text = text.replace(\"|\", '{\\\\textbar}')\n text = text.replace(\"<\", '{\\\\textless}')\n text = text.replace(\">\", '{\\\\textgreater}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n # text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"^\", '{\\\\textasciicircum}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\textasciitilde}')\n # Separate compound characters, e.g. \"--\" to \"-{}-\". (The\n # actual separation is done later; see below.)\n separate_chars = '-'\n if self.literal_block or self.literal:\n # In monospace-font, we also separate \",,\", \"``\" and \"''\"\n # and some other characters which can't occur in\n # non-literal text.\n separate_chars += ',`\\'\"<>'\n # pdflatex does not produce doublequotes for ngerman.\n text = self.babel.double_quotes_in_tt(text)\n if self.font_encoding == 'OT1':\n # We're using OT1 font-encoding and have to replace\n # underscore by underlined blank, because this has\n # correct width.\n text = text.replace('_', '{\\\\underline{ }}')\n # And the tt-backslash doesn't work in OT1, so we use\n # a mirrored slash.\n text = text.replace('\\\\textbackslash', '\\\\reflectbox{/}')\n else:\n text = text.replace('_', '{\\\\_}')\n else:\n text = self.babel.quote_quotes(text)\n text = text.replace(\"_\", '{\\\\_}')\n for char in separate_chars * 2:\n # Do it twice (\"* 2\") becaues otherwise we would replace\n # \"---\" by \"-{}--\".\n text = text.replace(char + char, char + '{}' + char)\n if self.insert_newline or self.literal_block:\n # Insert a blank before the newline, to avoid\n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n if self.literal_block:\n closings = \"}\" * len(self.literal_block_stack)\n openings = \"\".join(self.literal_block_stack)\n else:\n closings = \"\"\n openings = \"\"\n text = text.replace(\n \"\\n\", \"%s}\\\\\\\\\\n\\\\mbox{%s\" % (closings, openings))\n # lines starting with \"[\" give errors.\n text = text.replace('[', '{[}')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n if self.latex_encoding != 'utf8':\n text = self.unicode_to_latex(text)\n return text", "def caesar_encoder(text, shift):\n\n text = text.lower()\n encoded_str = ''\n index = eng_dict[shift]\n for char in text:\n if char == ' ':\n encoded_str += ' '\n else:\n orig_i = eng_dict[char]\n new_char = list(eng_dict.keys())[list(eng_dict.values()).index((orig_i + index) % 26)]\n encoded_str += new_char\n return encoded_str", "def the_work(text):\n\n new_text = \"\" # string holding text with changes made to it\n has_code = False # has ` without a closing `\n\n for word in text:\n\n if has_code == True:\n # if there's already been a code indicator\n if word == \"`\":\n new_text = new_text + \"</code>\"\n has_code = False\n else:\n # default\n # add the word to the new text string\n new_text = new_text + word\n\n else:\n # default\n if word == \"`\":\n # if there's a code indicator begin <code>\n # indicate that there needs to be a </code>\n new_text = new_text + \"<code>\"\n has_code = True\n else:\n # default\n # add the word to the new text string\n new_text = new_text + word\n\n return new_text", "def get_coded_string(self):\n if not self._coded_string:\n self._coded_string = self._encode_string(self._input_string) \n return self._coded_string", "def cencode(text):\n return _encode(text)[0]", "def coding_text(self, coding_text):\n\n self._coding_text = coding_text", "def compress(self, text: str) -> str:\n self.leafs = HuffmanTree.sort_leafs(HuffmanTree.create_leafs(text))\n nodes: [Node] = list(self.leafs)\n if not nodes:\n return \"\"\n if len(nodes) == 1:\n nodes = [InternalNode(1, nodes[0])]\n return self.__compress(text, nodes)", "def _decode_text(reverse_mapping, encoded_text):\n current = \"\"\n decoded_text = \"\"\n for bit in encoded_text:\n current += bit\n if current in reverse_mapping:\n character = reverse_mapping[current]\n decoded_text += character\n current = \"\"\n return decoded_text", "def prepare_for_tokenization(self, text, **kwargs):\n return text", "def code(self, elem, theme, inline=True):\n if inline:\n return theme.inline.render(elem.text)\n else:\n text = elem.text\n language = None\n\n if 'class' in elem.attrib:\n language = elem.attrib['class']\n\n if code_has_language(text):\n text, language = code_parse(text)\n\n style = self.style if theme.block.highlight else None\n\n return highlight_code(text, language, style,\n self.format).split('\\n')", "def encodeString():\n pass", "def forward_text_encoder(self, texts, dialogue_history=False, batchsize=None):\n texts_encoded = None\n if texts is None or (dialogue_history and not self.encode_dialogue_history):\n if (\n self.multimodal\n and self.multimodal_combo == \"concat\"\n and dialogue_history\n ):\n texts_encoded = torch.stack(\n [self.blank_encoding for _ in range(batchsize)]\n )\n else:\n encoder = self.context_encoder if dialogue_history else self.label_encoder\n indexes, mask = self.captions_to_tensor(texts)\n texts_encoded = encoder(indexes)\n if self.text_encoder_frozen:\n texts_encoded = texts_encoded.detach()\n texts_encoded = self.additional_layer(texts_encoded)\n\n return texts_encoded", "def render_text(self, indent: str = \" \") -> str:\n self.preprocess()\n return f\"{self._start()}{self._mid(indent)}{self._end()}\"", "def process_source_text(self, source_text):\n return source_text", "def __call__(self, text):\n for unit in self.units:\n text = unit.transform(text)\n return text", "def encode(self, texts):\n bert_tokens = []\n bert_masks = []\n bert_segments = []\n\n for text in texts:\n # Split text to array of tokens (words)\n text = self.tokenizer.tokenize(text)\n \n converter_input = ['[CLS]'] + text + ['[SEP]']\n\n tokens = self.tokenizer.convert_tokens_to_ids(converter_input)\n\n bert_tokens.append(tokens)\n\n ## Padding\n # Find the length of the longer sentence of tokens\n max_len = 0\n for i in bert_tokens:\n if len(i) > max_len:\n max_len = len(i)\n\n # Add padding to the end of each sentence of tokens. As a result we will \n # have equal length sentences of tokens and rhen transform them to numpy \n # array.\n padded_tokens = np.array([i+[0]*(max_len-len(i)) for i in bert_tokens])\n ## Masking\n # Zero(0) means ignore.\n bert_masks = np.where(padded_tokens != 0, 1, 0)\n ## Segments\n bert_segments = np.where(padded_tokens != 0, 0, 0)\n\n return padded_tokens, bert_masks, bert_segments" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if basis is insertion encodable by rightmost.
def is_insertion_encodable_rightmost(basis: Iterable[Perm]) -> bool: curr = 0 for perm in basis: curr = curr | InsertionEncodablePerms._insertion_encodable_properties(perm) if curr == InsertionEncodablePerms._ALL_PROPERTIES: return True return False
[ "def is_insertion_encodable(basis: Iterable[Perm]) -> bool:\n return InsertionEncodablePerms.is_insertion_encodable_rightmost(\n basis\n ) or InsertionEncodablePerms.is_insertion_encodable_maximum(basis)", "def is_insertion_encodable_maximum(basis: Iterable[Perm]) -> bool:\n curr = 0\n for perm in basis:\n curr = curr | InsertionEncodablePerms._insertion_encodable_properties(\n perm.rotate()\n )\n if curr == InsertionEncodablePerms._ALL_PROPERTIES:\n return True\n return False", "def is_insertion(ival):\n is_ins = ival.fields[18].endswith('insertion')\n return is_ins", "def is_inserted(self):\n return self.code < -200", "def is_last_position(self):\n if self.read_offset == len(self.underlying_read.seq) - 1:\n return True\n if self.underlying_read.cigar[-1][1] == _CIG_INSERTION:\n # read ends with insertion (RARE)\n if self._cig_offset == len(self.underlying_read.cigar) - 2:\n # at the 2nd-to-last cigar element\n return self._cig_elem_offset == self._cigarelement()[1] - 1 # last base\n return False", "def isLast(entity):", "def _is_after_insertion_at_reg_begin(self, delta):\n sel = self.view.sel()\n if len(sel) != 1:\n return False\n\n [sel] = sel\n if not sel.empty():\n return False\n\n pt = sel.a\n reg = self.get_edit_region()\n \n if pt == reg.a:\n return True\n\n if delta == 2 and pt == reg.a - 1 and\\\n self.view.substr(pt) in CLOSING_AUTOINSERT_CHARS:\n return True\n\n return False", "def orderOfInsertions(self, resSeq):\n firstAtom = None\n for atom in self.atoms.values():\n if atom.resSeq == resSeq and atom.iCode != '' and firstAtom is None:\n firstAtom = atom\n continue\n if firstAtom is not None and atom.resSeq == resSeq and atom.iCode !='' and firstAtom.iCode != atom.iCode:\n return firstAtom.iCode < atom.iCode\n else:\n firstAtom = None\n \n return True", "def _is_after_insertion_at_reg_end(self, delta):\n sel = self.view.sel()\n if len(sel) != 1:\n return False\n\n [sel] = sel\n if not sel.empty():\n return False\n\n pt = sel.a\n reg = self.get_edit_region()\n\n if pt == reg.b + delta:\n return True\n\n if delta == 2 and pt == reg.b + 1 and\\\n self.view.substr(pt) in CLOSING_AUTOINSERT_CHARS:\n return True\n\n return False", "def isLast(obj, namespace):", "def can_be_merged(prev, cur):\n\n WHITESPACE = (' ', '\\t')\n if not cur.mergeable or not prev.mergeable:\n return False\n elif cur.offset != (prev.offset + prev.length):\n return False\n elif cur.text in WHITESPACE and not prev.text in WHITESPACE:\n return False\n elif prev.text in WHITESPACE and not cur.text in WHITESPACE:\n return False\n return True", "def check_db_entry(self):\n raise NotImplementedError", "def is_rightmost(self) -> bool:\n if self.parent is None: return True\n return self.parent.children[-1] is self", "def valid_cursor_position(self):\n buffer = self.view.buffer\n insert = buffer.get_insert()\n insert_iter = buffer.get_iter_at_mark(insert)\n begin, end = self.stack[-1].bounds\n begin_iter = buffer.get_iter_at_mark(begin)\n end_iter = buffer.get_iter_at_mark(end)\n \n return insert_iter.in_range(begin_iter, end_iter)", "def isLast(self):\n pass", "def is_strict(self):\n for row in self:\n if any(row[i] == row[i+1] for i in range(len(row)-1)):\n return False\n return True", "def brc_checker(insert):\n\n # insert is None for terminal inserts.\n if insert is None:\n return False\n\n # return false if insert is not a pin / insert\n if insert.insert_type not in [\"Pin\", \"Offset\"]:\n return False\n\n # The brc name (as described in the fixture file (and wirelist)\n # is stored in fix_id.\n fix_id = insert.fix_id\n\n # a fix_id that is a string means there was a lookup error.\n if isinstance(fix_id, str):\n return False\n\n return fix_id.brc == token", "def _isEndOfRow(self):\r\n\t\tinfo=self.copy()\r\n\t\tinfo.expand(textInfos.UNIT_CHARACTER)\r\n\t\treturn info._rangeObj.getText(-1)==u'\\u0007'", "def is_identity(self):\n\t\tbasis = Generators.standard_basis(self.signature)\n\t\treturn self.image_of_set(basis) == basis" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if basis is insertion encodable by maximum.
def is_insertion_encodable_maximum(basis: Iterable[Perm]) -> bool: curr = 0 for perm in basis: curr = curr | InsertionEncodablePerms._insertion_encodable_properties( perm.rotate() ) if curr == InsertionEncodablePerms._ALL_PROPERTIES: return True return False
[ "def is_insertion_encodable(basis: Iterable[Perm]) -> bool:\n return InsertionEncodablePerms.is_insertion_encodable_rightmost(\n basis\n ) or InsertionEncodablePerms.is_insertion_encodable_maximum(basis)", "def is_insertion_encodable_rightmost(basis: Iterable[Perm]) -> bool:\n curr = 0\n for perm in basis:\n curr = curr | InsertionEncodablePerms._insertion_encodable_properties(perm)\n if curr == InsertionEncodablePerms._ALL_PROPERTIES:\n return True\n return False", "def is_max(self):\n return 'wintype' in self and self['wintype'] == 'max'", "def isLast(entity):", "def hasMax(*args, **kwargs):\n \n pass", "def is_plenty(self) -> bool:\n constraint = gen_constraint(self.order)[self.type]\n if constraint['min_keys'] < self.get_key_size() <= constraint['max_keys']:\n # not sure if the max key limit should be enforced. trying to redistribute an overflow node?\n return True\n else:\n return False", "def is_inserted(self):\n return self.code < -200", "def is_insertion(ival):\n is_ins = ival.fields[18].endswith('insertion')\n return is_ins", "def _get_hasMaximumValue(self) -> \"bool\" :\n return _core.DistanceValueCommandInput__get_hasMaximumValue(self)", "def test_max_entries_sign(self):\r\n # One arg.\r\n self.assertEquals(max_entries(1).sign, u.Sign.POSITIVE_KEY)\r\n self.assertEquals(max_entries(-2).sign, u.Sign.NEGATIVE_KEY)\r\n self.assertEquals(max_entries(Variable()).sign, u.Sign.UNKNOWN_KEY)\r\n self.assertEquals(max_entries(0).sign, u.Sign.ZERO_KEY)", "def test_result_has_max_requested_or_less(self):\n pass", "def is_last_position(self):\n if self.read_offset == len(self.underlying_read.seq) - 1:\n return True\n if self.underlying_read.cigar[-1][1] == _CIG_INSERTION:\n # read ends with insertion (RARE)\n if self._cig_offset == len(self.underlying_read.cigar) - 2:\n # at the 2nd-to-last cigar element\n return self._cig_elem_offset == self._cigarelement()[1] - 1 # last base\n return False", "def isMaxHeap(self):\r\n for i in range(1,int(self.size()/2)+1):\r\n if self[i] < self[2*i]:\r\n return False\r\n if 2*i + 1 < self.size():\r\n if self[i] < self[2*i + 1]:\r\n return False\r\n return True", "def _is_highest_root(self)->bool: # TEST\n return self.findPosition(-1) == 1", "def test_transMaxByAdd(self):\n self._setupTrans()\n \n # Confirm we reached the max\n self._confirmTotal(True)\n \n # Confirm we can't add anything else - assume Generic Item is $0.01\n self.log.info(\"Adding $0.01 cent item to go over limit\")\n pos.click_speed_key(\"Generic Item\")\n self._confirmMessage()\n \n self._clearTrans()", "def isLast(self):\n pass", "def is_maximal(self):\n return self.is_prime()", "def is_last_iter(self, trainer):\n return trainer.iter + 1 == trainer.max_iters", "def test_consider_max_significance(self):\n translation = TranslatedRevisionFactory(document__locale=\"de\", is_approved=True)\n ApprovedRevisionFactory(\n document=translation.document.parent,\n is_ready_for_localization=False, # should still count\n significance=MAJOR_SIGNIFICANCE,\n )\n ApprovedRevisionFactory(\n document=translation.document.parent,\n is_ready_for_localization=True,\n significance=MEDIUM_SIGNIFICANCE,\n )\n row = self.row()\n self.assertEqual(row[\"title\"], translation.document.title)\n self.assertEqual(str(row[\"status\"]), \"Immediate Update Needed\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if basis is insertion encodable.
def is_insertion_encodable(basis: Iterable[Perm]) -> bool: return InsertionEncodablePerms.is_insertion_encodable_rightmost( basis ) or InsertionEncodablePerms.is_insertion_encodable_maximum(basis)
[ "def is_inserted(self):\n return self.code < -200", "def is_insertion(ival):\n is_ins = ival.fields[18].endswith('insertion')\n return is_ins", "def is_insertion_encodable_rightmost(basis: Iterable[Perm]) -> bool:\n curr = 0\n for perm in basis:\n curr = curr | InsertionEncodablePerms._insertion_encodable_properties(perm)\n if curr == InsertionEncodablePerms._ALL_PROPERTIES:\n return True\n return False", "def is_insertion_encodable_maximum(basis: Iterable[Perm]) -> bool:\n curr = 0\n for perm in basis:\n curr = curr | InsertionEncodablePerms._insertion_encodable_properties(\n perm.rotate()\n )\n if curr == InsertionEncodablePerms._ALL_PROPERTIES:\n return True\n return False", "def check_db_entry(self):\n raise NotImplementedError", "def probe_checker(insert):\n\n # insert is None for terminal inserts.\n if insert is None:\n return False\n\n # extract the fixture id\n # (The name of the insert in the fixture file)\n fix_id = insert.fix_id\n\n # only strings are accepted.\n if not isinstance(fix_id, str):\n return False\n\n return fix_id == token", "def assertInsert(self, obj):\n self.assertEqual(self.obj, obj)", "def brc_checker(insert):\n\n # insert is None for terminal inserts.\n if insert is None:\n return False\n\n # return false if insert is not a pin / insert\n if insert.insert_type not in [\"Pin\", \"Offset\"]:\n return False\n\n # The brc name (as described in the fixture file (and wirelist)\n # is stored in fix_id.\n fix_id = insert.fix_id\n\n # a fix_id that is a string means there was a lookup error.\n if isinstance(fix_id, str):\n return False\n\n return fix_id.brc == token", "def _json_request(self):\n return 'json' in self._get_content_type()", "def _validate_insert(self, session, obj):\n self._validate_model(session, obj)", "def _is_after_insertion_at_reg_begin(self, delta):\n sel = self.view.sel()\n if len(sel) != 1:\n return False\n\n [sel] = sel\n if not sel.empty():\n return False\n\n pt = sel.a\n reg = self.get_edit_region()\n \n if pt == reg.a:\n return True\n\n if delta == 2 and pt == reg.a - 1 and\\\n self.view.substr(pt) in CLOSING_AUTOINSERT_CHARS:\n return True\n\n return False", "def IsSerializable(self) -> bool:", "def test_desert_is_migratable(self, desert):\n assert desert.is_migratable is True", "def _inserted(self, container):\n pass", "def insert_in_database(self, input_dict: Dict,\n date_time: Optional[datetime] = None) -> bool: \n for k in input_dict.keys():\n \n if k not in INPUT_TYPES:\n return False\n \n if ((INPUT_TYPES[k] is not None) and (not isinstance(input_dict[k],INPUT_TYPES[k]))):\n return False\n \n for k in input_dict['nutri_dict'].keys():\n \n if k not in NUTRI_KEYS:\n return False\n \n if not isinstance(input_dict['nutri_dict'][k],NUTRI_TYPES[k]):\n return False\n \n data_dict = copy.deepcopy(input_dict)\n del data_dict['nutri_dict']\n data_dict['UserID'] = self._user_id\n if date_time is None:\n data_dict['Datetime'] = datetime.utcnow()\n else:\n data_dict['Datetime'] = date_time\n \n for k in input_dict['nutri_dict'].keys():\n data_dict[k] = input_dict['nutri_dict'][k]\n\n try:\n self._database_manager.insert_row_1(self._table_name,data_dict)\n return True\n except:\n return False", "def _is_primitive(self, valid_dict):\n if valid_dict['type'] in self.primitive_keys:\n return True\n return False", "def test_insert():\n\n new = Key('testing_key')\n\n returned = new.insert()\n\n assert returned.acknowledged\n assert returned.inserted_id > -1", "def supports_authorization_record_type(self, authorization_record_type):\n return # boolean", "def _utf8_encoded_json(request):\r\n content_type = request.META.get('CONTENT_TYPE', '')\r\n parts = content_type.split(';')\r\n if (len(parts) != 2 or\r\n parts[0].strip().lower() != 'application/json' or\r\n parts[1].strip().lower() != 'charset=utf-8'):\r\n return False\r\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a Google Cloud Storage service object.
def GcsApi(self) -> 'googleapiclient.discovery.Resource': return common.CreateService( 'storage', self.CLOUD_STORAGE_API_VERSION)
[ "def _GetService() -> object_storage_service.ObjectStorageService: # pytype: disable=not-instantiable\n # TODO(pclay): consider using FLAGS.storage to allow cross cloud testing?\n cloud = FLAGS.cloud\n providers.LoadProvider(cloud)\n service = object_storage_service.GetObjectStorageClass(cloud)()\n # This method is idempotent with default args and safe to call in each phase.\n service.PrepareService(FLAGS.object_storage_region)\n return service", "def GcstApi(self) -> 'googleapiclient.discovery.Resource':\n\n if self.gcst_api_client:\n return self.gcst_api_client\n self.gcst_api_client = common.CreateService(\n 'storagetransfer', self.CLOUD_STORAGE_TRANSFER_API_VERSION)\n return self.gcst_api_client", "def star_storage_service(self) -> StorageService:\n return self.storage_services[self.config.storage.star]", "def _get_storage_client(cls, settings):\n client_settings = cls._subclass_specific_config(settings, {})\n\n client_args = {}\n if client_settings[\"project_id\"]:\n LOG.info(\"Using GCP project id `%s`\", client_settings[\"project_id\"])\n client_args[\"project\"] = client_settings[\"project_id\"]\n\n if client_settings[\"client_options\"]:\n client_args[\"client_options\"] = client_settings[\"client_options\"]\n\n service_account_json_filename = client_settings.get(\n \"service_account_json_filename\"\n )\n\n if not service_account_json_filename:\n LOG.info(\"Creating GCS client without service account JSON file\")\n\n client = storage.Client(**client_args)\n else:\n if not os.path.isfile(service_account_json_filename) and not cls.test:\n raise Exception(\n \"Service account JSON file not found at provided \"\n \"path {}\".format(service_account_json_filename)\n )\n\n LOG.info(\n \"Creating GCS client from service account JSON file %s\",\n service_account_json_filename,\n )\n\n client = storage.Client.from_service_account_json(\n service_account_json_filename, **client_args\n )\n\n return client", "def _get_blob_service(self, storage_client, group_name, storage_name):\n cached_key = (group_name, storage_name)\n blob_service = self._cached_blob_services.get(cached_key)\n\n if blob_service is None:\n with self._blob_services_lock:\n blob_service = self._cached_blob_services.get(cached_key)\n if blob_service is None:\n account_key = self._get_storage_account_key(\n storage_client=storage_client,\n group_name=group_name,\n storage_name=storage_name)\n\n blob_service = BlockBlobService(account_name=storage_name, account_key=account_key)\n self._cached_blob_services[cached_key] = blob_service\n\n return blob_service", "def _get_file_service(self, storage_client, group_name, storage_name):\n cached_key = (group_name, storage_name)\n file_service = self._cached_file_services.get(cached_key)\n\n if file_service is None:\n with self._file_services_lock:\n file_service = self._cached_file_services.get(cached_key)\n if file_service is None:\n account_key = self._get_storage_account_key(\n storage_client=storage_client,\n group_name=group_name,\n storage_name=storage_name)\n\n file_service = FileService(account_name=storage_name, account_key=account_key)\n self._cached_file_services[cached_key] = file_service\n\n return file_service", "def vault_storage_service(self) -> StorageService:\n return self.storage_services[self.config.storage.vault]", "def init_gcs(credentials_path, gcp_project):\n\n try:\n credentials = service_account.Credentials.from_service_account_file(\n credentials_path)\n except IOError:\n msg = 'no or invalid credentials found at {}, '.format(credentials_path)\n msg += 'have you run setup_environment.sh?'\n raise ValueError(msg)\n\n service = storage.Client(project=gcp_project, credentials=credentials)\n\n return service", "def get_storage_api(http):\n return build('storage', 'v1', http)", "def get_storage(storage_name):\n storage_config = settings.STORAGES[storage_name]\n storage_class_name = storage_config['class']\n storage_class = globals_dict[storage_class_name]\n storage_class_kwargs = storage_config.get('kwargs', {})\n storage_instance = storage_class(**storage_class_kwargs)\n return storage_instance", "def getStorage( self, parameterDict ):\n # The storage name must be supplied.\n if parameterDict.has_key( 'StorageName' ):\n storageName = parameterDict['StorageName']\n else:\n errStr = \"StorageFactory.getStorage: StorageName must be supplied\"\n gLogger.error( errStr )\n return S_ERROR( errStr )\n\n # ProtocolName must be supplied otherwise nothing with work.\n if parameterDict.has_key( 'ProtocolName' ):\n protocolName = parameterDict['ProtocolName']\n else:\n errStr = \"StorageFactory.getStorage: ProtocolName must be supplied\"\n gLogger.error( errStr )\n return S_ERROR( errStr )\n\n # The other options need not always be specified\n if parameterDict.has_key( 'Protocol' ):\n protocol = parameterDict['Protocol']\n else:\n protocol = ''\n\n if parameterDict.has_key( 'Port' ):\n port = parameterDict['Port']\n else:\n port = ''\n\n if parameterDict.has_key( 'Host' ):\n host = parameterDict['Host']\n else:\n host = ''\n\n if parameterDict.has_key( 'Path' ):\n path = parameterDict['Path']\n else:\n path = ''\n\n if parameterDict.has_key( 'SpaceToken' ):\n spaceToken = parameterDict['SpaceToken']\n else:\n spaceToken = ''\n\n if parameterDict.has_key( 'WSUrl' ):\n wsPath = parameterDict['WSUrl']\n else:\n wsPath = ''\n\n return self.__generateStorageObject( storageName, protocolName, protocol, path, host, port, spaceToken, wsPath, parameterDict )", "def k8s_storage_client(k8s_conf):\n logger.debug('Retrieving K8s networking API client')\n return client.StorageV1Api(get_client_conn(k8s_conf))", "def get_object_storage_client(self):\n _logger.debug('%s', where_am_i())\n if self._object_storage_client is None:\n if self.signer is not None:\n self._object_storage_client = \\\n oci_sdk.object_storage.object_storage_client.ObjectStorageClient(config=self.oci_config,\n signer=self.signer)\n else:\n self._object_storage_client = \\\n oci_sdk.object_storage.object_storage_client.ObjectStorageClient(config=self.oci_config)\n return self._object_storage_client", "def service_resource(self):\n\n # Obtain a method reference if we don't already have one. Otherwise, \n # reuse the one we've already obtained and cached in a static class \n # variable. This avoids significant real time delay.\n # TEMP COMMENTED OUT\n #if not Disk.method_ref:\n #Disk.method_ref = self.gce_project.service.disks()\n #return Disk.method_ref\n return self.gce_project.service.disks()", "def _get_service(project):\n scope = ['https://www.googleapis.com/auth/analytics.readonly',\n 'https://www.googleapis.com/auth/analytics',\n '']\n key_file_location = get_service_key_path(project)\n\n # Authenticate and construct service.\n return _build_service('analytics', 'v3', scope, key_file_location)", "def client():\n if es:\n return es\n else:\n raise StorageClientException(STORAGE_NAME,\n \"client used before initialization.\")", "def get(cls, client, name):\n response = client.api.storage_pools[name].get()\n\n storage_pool = cls(client, **response.json()['metadata'])\n return storage_pool", "def _get_storage(agent: AbstractAgent) -> Optional[Storage]:\n if agent.storage_uri:\n # threaded has to be always True, cause synchronous operations are supported\n return Storage(agent.storage_uri, threaded=True)\n return None # pragma: nocover", "def GetApi(provider):\n if getattr(_cloud_api_thread_local_storage, provider, None) is None:\n if provider == GCS_PREFIX:\n # TODO(b/159164504): Update with implemented GCS API.\n _cloud_api_thread_local_storage.gs = CloudApi()\n elif provider == AWS_S3_PREFIX:\n # TODO(b/159164385): Update with implemented S3 API.\n _cloud_api_thread_local_storage.s3 = CloudApi()\n else:\n raise ValueError('Provider API value must be \"gs\" or \"s3\".')\n return getattr(_cloud_api_thread_local_storage, provider)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get API operation object metadata for Google Cloud Storage object.
def GetObjectMetadata(self, gcs_path: str, user_project: Optional[str] = None) -> Dict[str, Any]: if not gcs_path.startswith('gs://'): gcs_path = 'gs://' + gcs_path bucket, object_path = SplitStoragePath(gcs_path) gcs_objects = self.GcsApi().objects() # pylint: disable=no-member request = gcs_objects.get( bucket=bucket, object=object_path, userProject=user_project) response = request.execute() # type: Dict[str, Any] return response
[ "def GetObjectMetadata(self,\n bucket_name,\n object_name,\n generation=None,\n provider=None,\n fields_scope=None):\n raise NotImplementedError('GetObjectMetadata must be overloaded')", "def blob_metadata(bucket_name, blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.get_blob(blob_name)\n\n print('Blob: {}'.format(blob.name))\n print('Bucket: {}'.format(blob.bucket.name))\n print('Storage class: {}'.format(blob.storage_class))\n print('ID: {}'.format(blob.id))\n print('Size: {} bytes'.format(blob.size))\n print('Updated: {}'.format(blob.updated))\n print('Generation: {}'.format(blob.generation))\n print('Metageneration: {}'.format(blob.metageneration))\n print('Etag: {}'.format(blob.etag))\n print('Owner: {}'.format(blob.owner))\n print('Component count: {}'.format(blob.component_count))\n print('Crc32c: {}'.format(blob.crc32c))\n print('md5_hash: {}'.format(blob.md5_hash))\n print('Cache-control: {}'.format(blob.cache_control))\n print('Content-type: {}'.format(blob.content_type))\n print('Content-disposition: {}'.format(blob.content_disposition))\n print('Content-encoding: {}'.format(blob.content_encoding))\n print('Content-language: {}'.format(blob.content_language))\n print('Metadata: {}'.format(blob.metadata))\n print(\"Temporary hold: \",\n 'enabled' if blob.temporary_hold else 'disabled')\n print(\"Event based hold: \",\n 'enabled' if blob.event_based_hold else 'disabled')\n if blob.retention_expiration_time:\n print(\"retentionExpirationTime: {}\"\n .format(blob.retention_expiration_time))", "def _object_resource_from_metadata(metadata):\n if metadata.generation is not None:\n # Generation may be 0 integer, which is valid although falsy.\n generation = str(metadata.generation)\n else:\n generation = None\n url = storage_url.CloudUrl(\n scheme=storage_url.ProviderPrefix.GCS,\n bucket_name=metadata.bucket,\n object_name=metadata.name,\n generation=generation)\n return gcs_resource_reference.GcsObjectResource(\n url,\n creation_time=metadata.timeCreated,\n etag=metadata.etag,\n md5_hash=metadata.md5Hash,\n metadata=metadata,\n metageneration=metadata.metageneration,\n size=metadata.size)", "def s3_metadata(self) -> 'outputs.S3CompatibleMetadataResponse':\n return pulumi.get(self, \"s3_metadata\")", "def get_object(self, container, obj):\r\n cname = self._resolve_name(container)\r\n oname = self._resolve_name(obj)\r\n obj_info = self.connection.head_object(cname, oname)\r\n lm_str = obj_info[\"last-modified\"]\r\n dtstr = _convert_head_object_last_modified_to_local(lm_str)\r\n obj = StorageObject(self, self.get_container(container),\r\n name=oname, content_type=obj_info[\"content-type\"],\r\n total_bytes=int(obj_info[\"content-length\"]),\r\n last_modified=dtstr, etag=obj_info[\"etag\"])\r\n return obj", "def get_blob_meta(objecturl, logprefix=\"\", **kwargs):\n bucketname, keyname = s3_split_url(objecturl)\n logprefix = logprefix + \" \" if logprefix else logprefix\n logger.debug(\"%sfetching meta for URL: %s\", logprefix, objecturl)\n s3 = boto3.client('s3')\n try:\n # if 'RequestPayer' not in kwargs:\n # kwargs['RequestPayer'] = 'requester'\n\n head_res = s3.head_object(Bucket=bucketname, Key=keyname, **kwargs)\n except ClientError as clierr:\n if clierr.response['Error']['Code'] == '404':\n raise NoSuchFile(objecturl)\n logger.error(\"%scould not fetch URL (%s): %s\", logprefix, repr(clierr.response['Error']['Code']), objecturl,\n exc_info=clierr)\n raise\n return head_res", "def get_object_metadatainfo(self, object_id):\n \n self.prepareThread()\n logger.info(\"Getting MetaData for object {%s}\", object_id)\n return getRuntime().get_metadata(object_id)", "def _backend_metadata(self) -> dict:\n return {\"storage_type\": \"public_s3_bucket\",\n \"name\": \"Public S3 Bucket\",\n \"description\": \"A type to use data stored in a public S3 bucket\",\n \"tags\": [\"unmanaged\", \"s3\", \"aws\"],\n \"icon\": \"s3.png\",\n \"url\": \"https://docs.gigantum.com\",\n \"readme\": \"\"\"This dataset type simply loads data from a public S3 bucket. It supports automatic \nsynchronization with S3 so you don't need to manually enter any information other than the bucket name. \n\nDue to the possibility of storing lots of data, when updating you can optionally keep all data locally or not. Because\n all files must be hashed when adding to the dataset, they all need to be downloaded by the creator. Once added\n to the dataset, partial downloads of the data is supported. To learn more, check out the docs here:\n [https://docs.gigantum.com](https://docs.gigantum.com)\n\"\"\"}", "def request_compute_metadata(path):\n gce_metadata_endpoint = 'http://' + os.environ.get(\n _GCE_METADATA_URL_ENV_VARIABLE, 'metadata.google.internal')\n req = request.Request(\n '%s/computeMetadata/v1/%s' % (gce_metadata_endpoint, path),\n headers={'Metadata-Flavor': 'Google'})\n info = request.urlopen(req).read()\n if isinstance(info, bytes):\n return info.decode('utf-8')\n else:\n return info", "def testGetObjectMetadata(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object4 = {\n \"id\": \"test_object_id4\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome4\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta4 = impl.save_object(test_object4)\n\n obj = impl.get_objectmeta({\"workspace\":ws_name,\"id\": \"test_object_id4\", \"type\": \"Genome\",\"auth\": self.__class__.token})\n\n self.assertIn({\"origin\":\"shreyas\"}, obj)", "def get_metadata(self, queue):\r\n uri = \"/%s/%s/metadata\" % (self.uri_base, utils.get_id(queue))\r\n resp, resp_body = self.api.method_get(uri)\r\n return resp_body", "def get_obj_stats(self, bucket_name_, prefix_, obj_name_):\n\n stats = {}\n\n try:\n obj_header = self.client.head_object(\n Bucket=bucket_name_, Key=prefix_ + obj_name_)\n\n stats[\"size_bytes\"] = obj_header[\"ContentLength\"]\n stats[\"size_mb\"] = obj_header[\"ContentLength\"] / 1048576\n stats[\"last_modified\"] = obj_header[\"LastModified\"]\n\n except ClientError as e:\n logging.info(\n f\"There was an error retrieving stats for {obj_name_}. {e} \")\n\n return stats", "def _get_metadata(handle, bucket, key):\n try:\n file_metadata = json.loads(handle.get(bucket=bucket, key=key).decode('utf-8'))\n return file_metadata\n except BlobNotFoundError:\n sys.stderr.write(f'Unable to locate: {bucket}/{key}')", "def get_meta(self):\n if not self.has_meta:\n try:\n self.head_object = self.get_s3_connection().head_object(Bucket=self.get_bucket(), Key=self.get_key())\n self.has_meta = True\n except Exception as e:\n if self.region is None:\n logging.debug('Could not get meta-data of S3Object. Region not set so assuming incorrect region.')\n logging.debug('Try to determine bucket region, and try to reconfigure the connection.')\n self.region = 'unknown'\n self.connect_to_bucket_region()\n return self.get_meta()\n else:\n region_set_no_metadata = 'Could not get meta-data of S3Object and region was {r}.'\n raise Exception(region_set_no_metadata.format(r=str(self.region))) from e\n return self.head_object", "def get_metadata(self, queue):\r\n return self._manager.get_metadata(queue)", "def get_gcsfuse_metadata():\n shots_bucket = get_metadata('attributes/gcsfuse_shots_bucket', 'shots')\n local_shots = get_metadata('attributes/gcsfuse_local_shots', '/shots')\n return dict(\n shots_bucket=shots_bucket,\n local_shots=local_shots,\n )", "def _getMetadataForObject(self, obj):\r\n # get the hash of the object and use it as a dict key for the metadata dict\r\n fshash = obj.hash()\r\n\r\n # no entry for this hash? make one first\r\n if fshash not in self._md:\r\n self._md[fshash] = {}\r\n\r\n return self._md[fshash]", "def metadata(self) -> dict[str, Any]:", "def get_item_metadata(self, handle):\n raise(NotImplementedError())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get ACLs for a Google Cloud Storage bucket. This includes both ACL entries and IAM policies.
def GetBucketACLs(self, bucket: str, user_project: Optional[str] = None) -> Dict[str, List[str]]: ret = collections.defaultdict(list) if bucket.startswith('gs://'): # Can change to removeprefix() in 3.9 bucket = bucket[5:] gcs_bac = self.GcsApi().bucketAccessControls() # pylint: disable=no-member request = gcs_bac.list(bucket=bucket, userProject=user_project) # https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls#resource ac_response = request.execute() for item in ac_response.get('items', []): if item.get('kind') == 'storage#bucketAccessControl': # Sanity check ret[item['role']].append(item['entity']) gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member request = gcs_buckets.getIamPolicy(bucket=bucket) # https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy iam_response = request.execute() for item in iam_response.get('bindings', []): for member in item.get('members', []): ret[item['role']].append(member) return ret
[ "def get_bucket_acl(client, bucket_name):\n req = client.bucketAccessControls().list(\n bucket=bucket_name,\n )\n resp = req.execute()\n return resp", "def get_bucket_defacl(client, bucket_name):\n req = client.defaultObjectAccessControls().list(\n bucket=bucket_name,\n )\n resp = req.execute()\n return resp", "def ListBucketObjects(self, bucket: str) -> List[Dict[str, Any]]:\n if bucket.startswith('gs://'):\n # Can change to removeprefix() in 3.9\n bucket = bucket[5:]\n gcs_objects = self.GcsApi().objects() # pylint: disable=no-member\n request = gcs_objects.list(bucket=bucket)\n objects = request.execute() # type: Dict[str, Any]\n return objects.get('items', [])", "def test_get_acl(make_stubber, make_unique_name, make_bucket):\n stubber = make_stubber(bucket_wrapper, 'get_s3')\n bucket_name = make_unique_name('bucket')\n\n bucket = make_bucket(stubber, bucket_wrapper, bucket_name, stubber.region_name)\n\n stubber.stub_get_bucket_acl(bucket_name, ['owner'])\n\n acl = bucket_wrapper.get_acl(bucket.name)\n assert len(acl.grants) == 1\n assert acl.owner['ID'] == acl.grants[0]['Grantee']['ID']\n assert acl.grants[0]['Permission'] == 'FULL_CONTROL'", "def gcp_ls(path, bucket_name):\n\n client = storage.Client()\n return client.list_blobs(bucket_name, prefix=path)", "def ListBuckets(self) -> List[Dict[str, Any]]:\n gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member\n request = gcs_buckets.list(project=self.project_id)\n objects = request.execute() # type: Dict[str, Any]\n return objects.get('items', [])", "def get_bucket_access_control_policy(bucket, key_id = None, secret = None):\n return client.GetBucketAccessControlPolicy(Bucket = bucket,\n\t\t\t\t\t **auth_keywords('GetBucketAccessControlPolicy',\n key_id, secret))", "def acl(self):\n # type: () -> list[AclEntry]\n return self._acl", "def check_perm_read_acl(self, bucket):\r\n\r\n if bucket.exists != BucketExists.YES:\r\n raise BucketMightNotExistException()\r\n\r\n try:\r\n bucket.foundACL = self.s3_client.get_bucket_acl(Bucket=bucket.name)\r\n self.parse_found_acl(bucket) # If we can read ACLs, we know the rest of the permissions\r\n except ClientError as e:\r\n if e.response['Error']['Code'] == \"AccessDenied\" or e.response['Error']['Code'] == \"AllAccessDisabled\":\r\n if self.aws_creds_configured:\r\n bucket.AuthUsersReadACP = Permission.DENIED\r\n else:\r\n bucket.AllUsersReadACP = Permission.DENIED\r\n else:\r\n raise e", "def _get_acls(self, datapath):\n auth_acl = datapath.acls.get(self._auth_acl_name)\n noauth_acl = datapath.acls.get(self._noauth_acl_name)\n return (auth_acl, noauth_acl)", "def parse_found_acl(self, bucket):\r\n if bucket.foundACL is None:\r\n return\r\n\r\n if 'Grants' in bucket.foundACL:\r\n for grant in bucket.foundACL['Grants']:\r\n if grant['Grantee']['Type'] == 'Group':\r\n if 'URI' in grant['Grantee'] and grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers':\r\n # Permissions have been given to the AuthUsers group\r\n if grant['Permission'] == 'FULL_CONTROL':\r\n bucket.AuthUsersRead = Permission.ALLOWED\r\n bucket.AuthUsersWrite = Permission.ALLOWED\r\n bucket.AuthUsersReadACP = Permission.ALLOWED\r\n bucket.AuthUsersWriteACP = Permission.ALLOWED\r\n bucket.AuthUsersFullControl = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ':\r\n bucket.AuthUsersRead = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ_ACP':\r\n bucket.AuthUsersReadACP = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE':\r\n bucket.AuthUsersWrite = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE_ACP':\r\n bucket.AuthUsersWriteACP = Permission.ALLOWED\r\n\r\n elif 'URI' in grant['Grantee'] and grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers':\r\n # Permissions have been given to the AllUsers group\r\n if grant['Permission'] == 'FULL_CONTROL':\r\n bucket.AllUsersRead = Permission.ALLOWED\r\n bucket.AllUsersWrite = Permission.ALLOWED\r\n bucket.AllUsersReadACP = Permission.ALLOWED\r\n bucket.AllUsersWriteACP = Permission.ALLOWED\r\n bucket.AllUsersFullControl = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ':\r\n bucket.AllUsersRead = Permission.ALLOWED\r\n elif grant['Permission'] == 'READ_ACP':\r\n bucket.AllUsersReadACP = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE':\r\n bucket.AllUsersWrite = Permission.ALLOWED\r\n elif grant['Permission'] == 'WRITE_ACP':\r\n bucket.AllUsersWriteACP = Permission.ALLOWED\r\n\r\n # All permissions not explicitly granted in the ACL are denied\r\n # TODO: Simplify this\r\n if bucket.AuthUsersRead == Permission.UNKNOWN:\r\n bucket.AuthUsersRead = Permission.DENIED\r\n\r\n if bucket.AuthUsersWrite == Permission.UNKNOWN:\r\n bucket.AuthUsersWrite = Permission.DENIED\r\n\r\n if bucket.AuthUsersReadACP == Permission.UNKNOWN:\r\n bucket.AuthUsersReadACP = Permission.DENIED\r\n\r\n if bucket.AuthUsersWriteACP == Permission.UNKNOWN:\r\n bucket.AuthUsersWriteACP = Permission.DENIED\r\n\r\n if bucket.AuthUsersFullControl == Permission.UNKNOWN:\r\n bucket.AuthUsersFullControl = Permission.DENIED\r\n\r\n if bucket.AllUsersRead == Permission.UNKNOWN:\r\n bucket.AllUsersRead = Permission.DENIED\r\n\r\n if bucket.AllUsersWrite == Permission.UNKNOWN:\r\n bucket.AllUsersWrite = Permission.DENIED\r\n\r\n if bucket.AllUsersReadACP == Permission.UNKNOWN:\r\n bucket.AllUsersReadACP = Permission.DENIED\r\n\r\n if bucket.AllUsersWriteACP == Permission.UNKNOWN:\r\n bucket.AllUsersWriteACP = Permission.DENIED\r\n\r\n if bucket.AllUsersFullControl == Permission.UNKNOWN:\r\n bucket.AllUsersFullControl = Permission.DENIED", "def get_acls(self):\n return self.access_list_manager.get_objects()", "def get(self, acl):\n return self._instance._client.acls.get(self._instance.name, acl)", "def bucket_generator():\n bucket_collection = get_s3_resource().buckets.all()\n for bucket in bucket_collection:\n yield bucket", "def bucket_listing(bucket):\n response = s3.list_objects(Bucket=bucket)\n\n file_listing = []\n for file_data in response[\"Contents\"]:\n data = {\"filename\": file_data[\"Key\"], \"size\": file_data[\"Size\"]}\n file_listing.append(data)\n\n print(\"File listing: {}\".format(file_listing))\n return file_listing", "def get_acls(scope: str, profile: str) -> Dict[str, str]:\n\n # Get the acls for the scope\n acl_query = 'databricks secrets list-acls'\n acl_query += f' --profile {profile}'\n acl_query += f' --scope {scope}'\n\n # Run and enforce success\n sp = subprocess.run(acl_query, capture_output=True)\n sp.check_returncode()\n\n # Extract the existing scopes\n acl_lines = [l.strip('\\r') for l in sp.stdout.decode().split('\\n')[1:]]\n acl_lines = [l for l in acl_lines if l.replace('-', '').strip()]\n acl_lines = [[elem for elem in l.split(' ') if elem] for l in acl_lines]\n\n # Turn acls int a dictionary\n existing_acls = {acl[0]: acl[1] for acl in acl_lines}\n\n return existing_acls", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n\n # for blob in blobs:\n # print(blob.name)\n return blobs", "def get_all_buckets(self):\n method = \"GET\"\n\n http_connection = HTTPConnection(\n compute_default_hostname(),\n self._identity.user_name,\n self._identity.auth_key,\n self._identity.auth_key_id\n )\n uri = compute_uri(\n \"/\".join([\"customers\", self._identity.user_name, \"collections\"]), \n )\n\n self._log.info(\"requesting {0}\".format(uri))\n try:\n response = http_connection.request(method, uri, body=None)\n except LumberyardHTTPError:\n instance = sys.exc_info()[1]\n self._log.error(str(instance))\n http_connection.close()\n raise\n \n self._log.info(\"reading response\")\n data = response.read()\n http_connection.close()\n collection_list = json.loads(data.decode(\"utf-8\"))\n\n bucket_list = list()\n for collection_dict in collection_list:\n bucket = Bucket(\n self._identity, \n collection_dict[\"name\"], \n versioning=collection_dict[\"versioning\"]\n )\n bucket_list.append(bucket)\n return bucket_list", "def getacl(self, mailbox):\n typ, dat = self._simple_command('GETACL', mailbox)\n return self._untagged_response(typ, dat, 'ACL')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List buckets in a Google Cloud project.
def ListBuckets(self) -> List[Dict[str, Any]]: gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member request = gcs_buckets.list(project=self.project_id) objects = request.execute() # type: Dict[str, Any] return objects.get('items', [])
[ "def list():\n return [b.name for b in s3.buckets.all()]", "def getBuckets(cos):\r\n\r\n res = []\r\n print(\"* Retrieving list of buckets\")\r\n\r\n try:\r\n buckets = cos.list_buckets()\r\n \r\n for b in buckets['Buckets']:\r\n res.append(b['Name'])\r\n\r\n return res\r\n \r\n except ClientError as err:\r\n print(\"[ERROR] Client error: {0}\\n\".format(err))\r\n \r\n except Exception as err:\r\n print(\"[ERROR] Unable to retrieve list buckets: {0}\".format(err))", "def ListBucketObjects(self, bucket: str) -> List[Dict[str, Any]]:\n if bucket.startswith('gs://'):\n # Can change to removeprefix() in 3.9\n bucket = bucket[5:]\n gcs_objects = self.GcsApi().objects() # pylint: disable=no-member\n request = gcs_objects.list(bucket=bucket)\n objects = request.execute() # type: Dict[str, Any]\n return objects.get('items', [])", "def get_all_buckets(client):\n res = client.list_buckets()\n buckets = [bucket['Name'] for bucket in res['Buckets']]\n return buckets", "def gcp_ls(path, bucket_name):\n\n client = storage.Client()\n return client.list_blobs(bucket_name, prefix=path)", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n\n # for blob in blobs:\n # print(blob.name)\n return blobs", "def get_all_buckets(self):\n method = \"GET\"\n\n http_connection = HTTPConnection(\n compute_default_hostname(),\n self._identity.user_name,\n self._identity.auth_key,\n self._identity.auth_key_id\n )\n uri = compute_uri(\n \"/\".join([\"customers\", self._identity.user_name, \"collections\"]), \n )\n\n self._log.info(\"requesting {0}\".format(uri))\n try:\n response = http_connection.request(method, uri, body=None)\n except LumberyardHTTPError:\n instance = sys.exc_info()[1]\n self._log.error(str(instance))\n http_connection.close()\n raise\n \n self._log.info(\"reading response\")\n data = response.read()\n http_connection.close()\n collection_list = json.loads(data.decode(\"utf-8\"))\n\n bucket_list = list()\n for collection_dict in collection_list:\n bucket = Bucket(\n self._identity, \n collection_dict[\"name\"], \n versioning=collection_dict[\"versioning\"]\n )\n bucket_list.append(bucket)\n return bucket_list", "def vbucket_list(self, bucket_name, vbucket_type=\"active\"):\n vb_list = list()\n output = self.get_stats_memc(bucket_name, \"vbucket\")\n for key in output.keys():\n curr_vb_type = output[key]\n if curr_vb_type == vbucket_type:\n vb_num = key\n vb_list.append(int(vb_num.split(\"_\")[1]))\n return vb_list", "def GetBucketACLs(self,\n bucket: str,\n user_project: Optional[str] = None) -> Dict[str, List[str]]:\n ret = collections.defaultdict(list)\n if bucket.startswith('gs://'):\n # Can change to removeprefix() in 3.9\n bucket = bucket[5:]\n gcs_bac = self.GcsApi().bucketAccessControls() # pylint: disable=no-member\n request = gcs_bac.list(bucket=bucket, userProject=user_project)\n # https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls#resource\n ac_response = request.execute()\n for item in ac_response.get('items', []):\n if item.get('kind') == 'storage#bucketAccessControl': # Sanity check\n ret[item['role']].append(item['entity'])\n gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member\n request = gcs_buckets.getIamPolicy(bucket=bucket)\n # https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy\n iam_response = request.execute()\n for item in iam_response.get('bindings', []):\n for member in item.get('members', []):\n ret[item['role']].append(member)\n return ret", "def list_buckets(self, time_delete: float) -> Iterator[str]:\n response = self.s3.list_buckets()\n\n for bucket in response[\"Buckets\"]:\n if bucket[\"CreationDate\"].timestamp() < time_delete:\n yield bucket[\"Name\"]", "def test_api_can_get_all_bucket(self):\n res = self.client.post('/buckets/', json=self.bucket)\n self.assertEqual(res.status_code, 201)\n res = self.client.get('/buckets/')\n self.assertEqual(res.status_code, 200)\n self.assertEqual(1, res.json['total'])", "def google_bucket_list(url: str, folder: str, filetype: str = None, full_path: bool = False) -> List[str]:\n try:\n resp = requests.get(url, timeout=TIMEOUT)\n resp.raise_for_status()\n except RequestException:\n logger.exception(\"Could not connect, URL may be out of service\")\n raise\n root = ElementTree.fromstring(resp.content)\n bucket_list = []\n for r in root:\n if list(r):\n filepath = r[0].text\n if filetype is not None:\n if filepath.startswith(folder) and filepath.endswith(filetype):\n istart, istop = filepath.find('/') + 1, filepath.find('.')\n bucket_list.append(filepath[istart:istop])\n else:\n if filepath.startswith(folder):\n istart, istop = filepath.find('/') + 1, filepath.find('.')\n bucket_list.append(filepath[istart:istop])\n return bucket_list", "def gcs_list_buckets_with_prefix(*, prefix: str = \"\") -> List[bucket.Bucket]:\n\n storage_client = storage.Client()\n buckets = list(storage_client.list_buckets())\n bucket_list = []\n for bucket in buckets:\n if bucket.name.startswith(prefix):\n bucket_list.append(bucket)\n\n return bucket_list", "def bucket_listing(bucket):\n response = s3.list_objects(Bucket=bucket)\n\n file_listing = []\n for file_data in response[\"Contents\"]:\n data = {\"filename\": file_data[\"Key\"], \"size\": file_data[\"Size\"]}\n file_listing.append(data)\n\n print(\"File listing: {}\".format(file_listing))\n return file_listing", "def quota_get_all_by_project_id(self, project_id):", "def blobs_list(format_):\n\n project_name = get_current_project(error=True)\n\n client = init_client()\n response = client.blobs_list(project_name=project_name)\n client.api_client.close()\n\n print_list(response, LIST_ITEMS, rename_cols={'ttl': 'time_to_live'}, sorting_col=2, fmt=format_)", "def get_buckets(self) -> List[B2Bucket]:\n\n buckets = []\n for account in self.accounts.values():\n buckets += account.buckets\n\n return buckets", "def parse_list_buckets(data):\n root = S3Element.fromstring('ListBucketsResult', data)\n\n return [\n Bucket(bucket.get_child_text('Name'),\n bucket.get_time_elem('CreationDate'))\n for buckets in root.findall('Buckets')\n for bucket in buckets.findall('Bucket')\n ]", "def list_google_accounts(self):\n return self._get(route='GoogleCloudAccount')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List objects (with metadata) in a Google Cloud Storage bucket.
def ListBucketObjects(self, bucket: str) -> List[Dict[str, Any]]: if bucket.startswith('gs://'): # Can change to removeprefix() in 3.9 bucket = bucket[5:] gcs_objects = self.GcsApi().objects() # pylint: disable=no-member request = gcs_objects.list(bucket=bucket) objects = request.execute() # type: Dict[str, Any] return objects.get('items', [])
[ "def bucket_listing(bucket):\n response = s3.list_objects(Bucket=bucket)\n\n file_listing = []\n for file_data in response[\"Contents\"]:\n data = {\"filename\": file_data[\"Key\"], \"size\": file_data[\"Size\"]}\n file_listing.append(data)\n\n print(\"File listing: {}\".format(file_listing))\n return file_listing", "def gcp_ls(path, bucket_name):\n\n client = storage.Client()\n return client.list_blobs(bucket_name, prefix=path)", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n\n # for blob in blobs:\n # print(blob.name)\n return blobs", "def ListBuckets(self) -> List[Dict[str, Any]]:\n gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member\n request = gcs_buckets.list(project=self.project_id)\n objects = request.execute() # type: Dict[str, Any]\n return objects.get('items', [])", "def list():\n return [b.name for b in s3.buckets.all()]", "def list_files(bucket: str):\n s3 = boto3.client('s3')\n contents = []\n for item in s3.list_objects(Bucket=bucket)['Contents']:\n contents.append(item)\n print(contents)\n return contents", "def blobs_list(format_):\n\n project_name = get_current_project(error=True)\n\n client = init_client()\n response = client.blobs_list(project_name=project_name)\n client.api_client.close()\n\n print_list(response, LIST_ITEMS, rename_cols={'ttl': 'time_to_live'}, sorting_col=2, fmt=format_)", "def print_bucket_files(s3):\n for bucket in s3.buckets.all():\n print(bucket.name)\n for ob in bucket.objects.all():\n print(\"\\t+\" + ob.__str__())", "def printS3items(): \r\n session = Session(aws_access_key_id=access_key_id,\r\n aws_secret_access_key=secret_access_key)\r\n your_bucket = session.resource('s3').Bucket(Bucket_name)\r\n for s3_file in your_bucket.objects.all():\r\n print(s3_file.key)", "def list_blobs(bucket_name, max_blobs):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n\n json_blobs = []\n for b in blobs:\n if b.name.endswith('.json'):\n json_blobs.append(b)\n\n recent_blobs = sorted(json_blobs, key=lambda blob: blob.updated, reverse=True)\n d = collections.OrderedDict()\n num_blobs = 0\n for b in recent_blobs:\n formatted_date = b.updated.strftime('%Y-%m-%d %H:%M:%S')\n d[formatted_date] = b\n num_blobs += 1\n if num_blobs == max_blobs:\n break\n return d", "def _FlatListBucket(self, bucket_url_string):\n return self.RunGsUtil(['ls', suri(bucket_url_string, '**')],\n return_stdout=True)", "def list_blobs_with_prefix(bucket_name, prefix, delimiter=None):\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name, prefix=prefix,\n delimiter=delimiter)\n\n print('Blobs:')\n for blob in blobs:\n print(blob.name)\n\n if delimiter:\n print('Prefixes:')\n for prefix in blobs.prefixes:\n print(prefix)", "def list():\n s3 = boto3.resource('s3')\n\n this_bucket = s3.Bucket(config.S3_ZOOM_BUCKET)\n\n keys = [x.key for x in this_bucket.objects.all() if x.key.endswith('.dzi')]\n return flask.jsonify({'keys': keys})", "def iterate_bucket(self, bucket, prefix, fn):\n paginator = boto3.client('s3').get_paginator('list_objects')\n for page in paginator.paginate(Bucket=bucket, Prefix=prefix):\n for obj in page['Contents']:\n key = obj['Key']\n fn(bucket, key)", "def google_bucket_list(url: str, folder: str, filetype: str = None, full_path: bool = False) -> List[str]:\n try:\n resp = requests.get(url, timeout=TIMEOUT)\n resp.raise_for_status()\n except RequestException:\n logger.exception(\"Could not connect, URL may be out of service\")\n raise\n root = ElementTree.fromstring(resp.content)\n bucket_list = []\n for r in root:\n if list(r):\n filepath = r[0].text\n if filetype is not None:\n if filepath.startswith(folder) and filepath.endswith(filetype):\n istart, istop = filepath.find('/') + 1, filepath.find('.')\n bucket_list.append(filepath[istart:istop])\n else:\n if filepath.startswith(folder):\n istart, istop = filepath.find('/') + 1, filepath.find('.')\n bucket_list.append(filepath[istart:istop])\n return bucket_list", "def list_buckets(self, time_delete: float) -> Iterator[str]:\n response = self.s3.list_buckets()\n\n for bucket in response[\"Buckets\"]:\n if bucket[\"CreationDate\"].timestamp() < time_delete:\n yield bucket[\"Name\"]", "def enumerate_bucket_objects(self, bucket):\r\n if bucket.exists == BucketExists.UNKNOWN:\r\n self.check_bucket_exists(bucket)\r\n if bucket.exists == BucketExists.NO:\r\n raise Exception(\"Bucket doesn't exist\")\r\n\r\n try:\r\n for page in self.s3_client.get_paginator(\"list_objects_v2\").paginate(Bucket=bucket.name):\r\n if 'Contents' not in page: # No items in this bucket\r\n bucket.objects_enumerated = True\r\n return\r\n for item in page['Contents']:\r\n obj = S3BucketObject(key=item['Key'], last_modified=item['LastModified'], size=item['Size'])\r\n bucket.add_object(obj)\r\n except ClientError as e:\r\n if e.response['Error']['Code'] == \"AccessDenied\" or e.response['Error']['Code'] == \"AllAccessDisabled\":\r\n raise AccessDeniedException(\"AccessDenied while enumerating bucket objects\")\r\n bucket.objects_enumerated = True", "def gcs_list_buckets_with_prefix(*, prefix: str = \"\") -> List[bucket.Bucket]:\n\n storage_client = storage.Client()\n buckets = list(storage_client.list_buckets())\n bucket_list = []\n for bucket in buckets:\n if bucket.name.startswith(prefix):\n bucket_list.append(bucket)\n\n return bucket_list", "def _get_s3_objects(self):\r\n try:\r\n s3_actions = S3Actions()\r\n object_details_list = s3_actions.list_objects_in_buckets(self.bucket_name)\r\n if not object_details_list:\r\n return 'Objects not found',404\r\n else:\r\n return object_details_list,200\r\n except Exception,e:\r\n logging.error(e.message)\r\n return 'Exception Occured',400" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes an object in a Google Cloud Storage bucket.
def DeleteObject(self, gcs_path: str) -> None: if not gcs_path.startswith('gs://'): gcs_path = 'gs://' + gcs_path bucket, object_path = SplitStoragePath(gcs_path) gcs_objects = self.GcsApi().objects() # pylint: disable=no-member request = gcs_objects.delete(bucket=bucket, object=object_path) request.execute()
[ "def bucket_delete():\r\n if not confirm(\"Are you sure you want to delete the bucket %r?\" % BUCKET_NAME):\r\n abort('Aborting at user request.')\r\n conn = connect_s3()\r\n conn.delete_bucket(BUCKET_NAME)\r\n print 'Bucket %r deleted.' % BUCKET_NAME", "def delete_object(bucket, key):\n return ObjectStore.delete_object(bucket=bucket, key=key)", "def delete(self):\n\n # TODO: Make sure the proper exceptions are raised.\n\n return self.connection.delete_bucket(self.name)", "def delete(self, bucket_name, blob_name):\n bucket = self.gcs_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.delete()\n print('Blob {} deleted.'.format(blob_name))", "def delete_via_gsutil(bucket_id, file_path):\n\n gs_url = \"gs://\" + bucket_id\n filename = file_path.split(\"/\")[-1]\n\n # Delete file from bucket via gsutil\n command = \"gsutil rm \" + gs_url + \"/\" + filename\n\n if not cmdline.func_CMD(command=command, stdout=False):\n print(f\"ERROR: failed to delete {filename}.\")\n # custom exit code to indicate exit-failed-to-gsutil-delete-file\n # TODO: replace with python error handling and logging (SCP-2790)\n exit(82)", "def delete_bucket(bucket, force=False):\n return _objstore_backend.delete_bucket(bucket=bucket, force=force)", "def delete_bucket(self):\n self.s3_client.delete_bucket(Bucket=BUCKET_NAME)\n print(\"Deleted Bucket: %s\" % BUCKET_NAME)", "def delete_blob(bucket_name, blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n blob.delete()", "def delete_object(bucket=None, key=None, uri=None, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n return s3_resource.Bucket(bucket).Object(key=key).delete()", "def rm_in_bucket(s3, bucket):\n bucket = s3.Bucket(bucket)\n bucket.objects.all().delete()", "def delete_file_from_bucket(self):\n self.s3_client.delete_object(Bucket=BUCKET_NAME, Key=FILENAME)\n print(\"File %s deleted from Bucket: %s\" % (FILENAME, BUCKET_NAME))", "def delete_bucket():\n\n s3 = session.resource('s3')\n\n try:\n bucket = s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\")\n bucket.objects.all().delete()\n bucket.delete()\n print('Deleted S3 bucket!')\n\n except Exception as e:\n print(f\"Error deleting S3 bucket. Exception: {e}.\")", "def gcs_delete_bucket_dir(*, bucket_name: str, prefix: str):\n\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n blobs = bucket.list_blobs(prefix=prefix)\n\n for blob in blobs:\n blob.delete()", "def delete_file(key):\n try:\n s3_bucket.Object(key).delete()\n except Exception as e:\n print(e)", "def delete(ctx):\n delete_script = \"\"\"\n rm -r $OUTPUT_PATH/fhir/IG || true > /dev/null\n gsutil -m rm -r gs://$GOOGLE_BUCKET/fhir/IG \n \"\"\"\n run_cmd(delete_script)", "def test_bucket_deletion(self):\n rv = self.client.post('/buckets/', json=self.bucket)\n self.assertEqual(rv.status_code, 201)\n res = self.client.delete('/buckets/1')\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 404\n res = self.client.get('/buckets/1')\n self.assertEqual(res.status_code, 404)", "def delete_gcs_storage_controller(self, request):\n try:\n logging.info(\"Delete GCS storage on Label Studio project\")\n delete_storage_url = (\n f\"{self.label_studio_config.get('gcs_storage')}/{request.storage_id}\"\n )\n status_code = APIInterface.delete(\n route=delete_storage_url, headers=self.header\n )\n if status_code == 204:\n return {\"status\": \"Storage Deleted Successfully\"}\n else:\n raise Exception({\"status\": \"Cannot Delete The Storage\"})\n except Exception as error:\n logging.error(f\"Error in delete_gcs_storage_controller: {error}\")\n raise error", "def delete(self):\n # Update bucket size.\n self.bucket.size -= self.size\n # Remove parts\n Part.query_by_multipart(self).delete()\n # Remove self\n self.query.filter_by(upload_id=self.upload_id).delete()", "def delete_s3_storage_controller(self, request):\n try:\n logging.info(f\"Delete S3 storage from Label Studio project\")\n delete_storage_url = (\n f\"{self.label_studio_config.get('s3_storage')}/{request.storage_id}\"\n )\n status_code = APIInterface.delete(\n route=delete_storage_url, headers=self.header\n )\n if status_code == 204:\n return {\"status\": \"Storage Deleted Successfully\"}\n else:\n raise Exception({\"status\": \"Cannot Delete The Storage\"})\n except Exception as error:\n logging.error(f\"Error in delete_s3_storage_controller: {error}\")\n raise error" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetches information for a provided station name from SomaFM
def somafm_info(bot, trigger): user_input = trigger.group(2) if not user_input: return bot.reply("I need a station to lookup!") stations = _fetch(bot, API_CHANNELS_URL) user_input = user_input.strip().lower() station = [] for channel in stations['channels']: if "*" in user_input: check = user_input.replace("*", "") if not check: return bot.reply("I can't lookup ALL channels at once nimrod.") if check in channel['title'].lower() \ or check in channel['id'].lower(): station.append(channel) else: if user_input == channel['title'].lower() \ or user_input == channel['id'].lower(): station.append(channel) if not station: return bot.reply("I couldn't find any stations by that name ({})".format(user_input)) for s in station: channel_id = s["id"] tracks = _fetch(bot, API_SONGS_URL.format(channel_id)) artist = tracks["songs"][0]["artist"] song = tracks["songs"][0]["title"] album = tracks["songs"][0]["album"] station_url = "https://somafm.com/{}/".format(channel_id) reply = (f"[SomaFM] {bold(s['title'])} ({s['listeners']} listeners)" f" {s['description']} | {bold('DJ')}: {s['dj']} | {bold('Genre')}: {s['genre'].replace('|','/')}" f" | {bold('Playing')}: {song} by {artist} [{album}] | Listen @ {station_url}" ) bot.say(reply, max_messages=2)
[ "def station_by_name(self, name):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if name == _[\"properties\"][\"name\"]]\n log.debug(\"searching for station {} found {}\".format(name, station))\n return station[0]\n except:\n log.debug(\"Exception: searching for station {} found None\".format(name))\n return None", "async def get_station(station: avwx.Station) -> dict:\n await app.station.add(station.icao, \"station\")\n return asdict(station)", "async def _get_station(self) -> Any:\n url = URL_STATION.format(self.station_id)\n return await self._async_get(url)", "def single_station_data(df, station):\n return df[df['STATION_NAME'] == station]", "def get_station_name(self):\n pass", "def find_station(self, name):\n for st in self._stations:\n if st.is_named(name):\n return st\n return None", "def find_station(value):\n for line in self.line_list:\n station = line.find_station(value)\n if station:\n return station\n return None", "def getClosestStation(query):\n return maps.queryVenue(\"train stations near \" + query).partition(\" \")[0] # get just name of station", "def _get_station_info(feeds):\n if 'station_information' not in feeds:\n raise ValueError('Missing station information feed. Got feeds ' +\n str(feeds))\n resp = requests.get(feeds['station_information'])\n resp.raise_for_status()\n return resp.json()['data']['stations']", "def populate_station_details(self):\n\n # --- First we need to check to see if the station player is playing\n # audio and if it is then we need to stop that audio\n if self.station_player_state == self.player_states[2]: self.station_player_controller()\n\n\n # --- Call out to the api and get the station attributes for the selected station\n result = self.get_station_attributes(self.station_ids[self.station_selector.currentText()],\n api_version=self.api_version_selector.currentText(),\n environment=self.environment_selector.currentText())\n\n if len(result) == 0: return # If we got nothing back from get_statoion_attributes() then quit\n\n stream_url = result[\"station_stream\"][FIRST][\"url\"]\n\n # --- set the station_id and callsign for the selected station\n self.selected_station_id = result[\"id\"]\n self.selected_station_callsign = result[\"callsign\"]\n\n # --- Populate the station details text boxes\n station_attributes = list(result.values())\n # TODO: find a cool \"index into a list with anther list\"\" solution\n counter = 0\n for item in station_attributes[0:len(self.station_details_values)]:\n font = self.station_details_values[counter].font()\n font.setPointSize(11)\n font.setBold(True)\n self.station_details_values[counter].setFont(font)\n self.station_details_values[counter].setText(str(item))\n self.station_details_values[counter].setCursorPosition(0)\n counter += 1\n\n\n # Download and show the image and set the image in the station tab\n station_logo_filename = podcast_player_utils.download_station_logo(result[\"square_logo_small\"])\n if os.path.isfile(station_logo_filename):\n pixmap = QPixmap(station_logo_filename)\n pixmap_resized = pixmap.scaled(150, 150, Qt.KeepAspectRatio)\n self.station_logo_image.setPixmap(pixmap_resized)\n self.staton_callsign_label.setText(\"%s %s\" %(result[\"name\"], result[\"callsign\"]))\n else:\n # TODO: Message box to tell user no image available for this station\n pass\n\n # Show and load the station player\n pixmap = QPixmap(os.path.join(RESOURCE_PATH, \"play.png\"))\n pixmap_resized = pixmap.scaled(150, 150, Qt.KeepAspectRatio)\n self.station_player_button.setPixmap(pixmap_resized)\n self.StationPlayer = vlc.MediaPlayer(stream_url)\n self.station_player_state = self.player_states[1] # Media ready\n self.station_player_label.setText(self.station_player_state)\n\n # --- Now we can populate the podcasts tab with all\n # of the podcasts for this station\n self.populate_podcasts()", "def _read_stations(self, stationinfo):\n stations = []\n with open(stationinfo, \"r\") as f:\n for line in f:\n name, stla, stlo, stel = line.split()[0:4]\n station = {\"name\": name,\n \"stla\": float(stla),\n \"stlo\": float(stlo),\n \"stel\": float(stel)\n }\n stations.append(station)\n logger.info(\"%d stations found.\", len(stations))\n return stations", "def station_by_id(self, id):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if _[\"properties\"][\"station_id\"] == id]\n log.debug(\"searching for station_id {} found {}\".format(id, station))\n return station[0]\n except:\n log.debug(\"searching for station_id {} found None\".format(id))\n return None", "def fetch_station_details(self): \n try:\n station_json = self.get_station_json()['root']['station']\n except:\n logging.error(\"incorrect input provided. \")\n return None\n \n \n dict_data = dict()\n\n #get station name\n dict_data['start_station_name'] = station_json[0]['name']\n \n #get date and time\n dict_data['current_date'] = self.get_station_json()['root']['date']\n dict_data['current_time'] = self.get_station_json()['root']['time']\n \n \n #get destination data in a dictionary of \"destination_name\":\"time\"\n destination_dict={}\n \n for i,j in enumerate(station_json[0]['etd']):\n try:\n destination_dict[j['destination']] = int(j[\"estimate\"][0]['minutes'])\n except:\n #in case Train is Leaving, its time of leaving will be 'Leaving' \n #and is default set to 0\n destination_dict[j['destination']] = 0\n \n dict_data['destintation_data'] = destination_dict\n return dict_data", "def info(self, name):\n\n station = self._playlist.get_station(name)\n if station:\n station.get_description()\n else:\n print(\"\\tNo references for '{}'\\n\".format(name), file=sys.stderr)", "def get_station(self, id):\n status, data = self.http_client.get_json(\n NAMED_STATION_URI % str(id),\n params={'appid': self.API_key},\n headers={'Content-Type': 'application/json'})\n return Station.from_dict(data)", "def station_by_location(self, location):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if location == _[\"geometry\"][\"coordinates\"]]\n log.debug(\"searching for location {} found {}\".format(location, station))\n return station[0]\n except:\n log.debug(\"searching for location {} found None\")\n return None", "def test_station_present(client):\n\n r = client.get('/api/stations/4618')\n assert b'VLADIMIR' in r.data", "def read_single_station(d, station_info, date):\n nztz = timezone('Pacific/Auckland')\n date_nz = nztz.localize(datetime.datetime(date.year, date.month,\n date.day, 6, 0, 0))\n timeshift = int(date_nz.utcoffset().seconds/3600.)\n datestr = '{:d}-{:02d}-{:02d}'.format(date.year, date.month, date.day)\n\n # Read the raw data\n if station_info['files']['raw'] is None:\n # There is no point continuing if we don't have any raw data\n msg = \"INFO 01: No raw data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n return\n\n e0 = d.read(station_info['files']['raw'],\n ftype='minidoas-raw', timeshift=timeshift)\n ib = InstrumentBuffer(name=station_info['stationID'],\n location=station_info['stationLoc'],\n no_bits=16,\n type='MiniDOAS')\n i = d.new(ib)\n try:\n rdt = d.elements['RawDataType'][0]\n except:\n rdt = d.new(e0['RawDataTypeBuffer'])\n rb = e0['RawDataBuffer']\n rb.type = rdt\n rb.instrument = i\n rb.target = station_info['target']\n lat = np.ones(rb.d_var.shape[0])*station_info['lat']\n lon = np.ones(rb.d_var.shape[0])*station_info['lon']\n elev = np.ones(rb.d_var.shape[0])*station_info['elev']\n bearing = np.ones(rb.d_var.shape[0])*np.rad2deg(station_info['bearing'])\n rb.position = np.array([lon, lat, elev]).T\n rb.bearing = bearing\n rb.inc_angle_error = np.ones(rb.d_var.shape[0])*0.013127537*180./np.pi\n rr = d.new(rb)\n\n # Read the concentration\n if station_info['files']['spectra'] is None:\n msg = \"INFO 02: No concentration (i.e. spectra) data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n return\n\n # First read in the smoothed version of the concentration\n # which the subsequent computation of flux values is\n # based on\n e1 = d.read(station_info['files']['spectra'],\n date=datestr, ftype='minidoas-spectra',\n timeshift=timeshift, model=True)\n cb = e1['ConcentrationBuffer']\n idxs = np.zeros(cb.value.shape)\n for i in range(cb.value.shape[0]):\n idx = np.argmin(np.abs(rr.datetime[:].astype('datetime64[ms]')\n - cb.datetime[i].astype('datetime64[ms]')))\n idxs[i] = idx\n cb.rawdata = [rr]\n cb.rawdata_indices = idxs\n cb.method = station_info['widpro_method']\n cb.user_notes = 'smoothed path concentration'\n cc = d.new(cb)\n\n # Now read in the original path concentration\n # to keep as a reference\n e2 = d.read(station_info['files']['spectra'],\n date=datestr, ftype='minidoas-spectra',\n timeshift=timeshift)\n cb2 = e2['ConcentrationBuffer']\n idxs = np.zeros(cb2.value.shape)\n for i in range(cb.value.shape[0]):\n idx = np.argmin(np.abs(rr.datetime[:].astype('datetime64[ms]')\n - cb2.datetime[i].astype('datetime64[ms]')))\n idxs[i] = idx\n cb2.rawdata = [rr]\n cb2.rawdata_indices = idxs\n cb2.method = station_info['widpro_method']\n cb2.user_notes = 'original path concentration'\n\n # Read in the flux estimates for assumed height\n if station_info['files']['flux_ah'] is None:\n msg = \"INFO 03: No assumed height flux data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n else:\n e3 = d.read(station_info['files']['flux_ah'],\n date=datestr, ftype='minidoas-scan',\n timeshift=timeshift)\n fb = e3['FluxBuffer']\n dt = fb.datetime[:].astype('datetime64[s]')\n indices = []\n for _dt in dt:\n idx = np.argmin(np.abs(cc.datetime[:].astype('datetime64[us]')\n - _dt))\n idx0 = idx\n while True:\n angle = rr.inc_angle[cc.rawdata_indices[idx]+1]\n if angle > 180.:\n break\n idx += 1\n idx1 = idx\n indices.append([idx0, idx1+1])\n fb.concentration = cc\n fb.concentration_indices = indices\n\n gfb1 = e3['GasFlowBuffer']\n\n m2 = None\n for _m in d.elements['Method']:\n if _m.name[:] == 'WS2PV':\n m2 = _m\n if m2 is None:\n mb2 = e3['MethodBuffer']\n m2 = d.new(mb2)\n\n gfb1.methods = [m2]\n gf1 = d.new(gfb1)\n fb.gasflow = gf1\n f = d.new(fb)\n # Now read in preferred flux values for assumed\n # height downloaded from FITS\n if station_info['files']['fits_flux_ah'] is None:\n msg = \"ERROR 01: No preferred flux for assumed height in FITS:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.error(msg)\n else:\n data_ah = np.loadtxt(station_info['files']['fits_flux_ah'],\n dtype=np.dtype([('date', 'S19'),\n ('val', np.float),\n ('err', np.float)]),\n skiprows=1, delimiter=',', ndmin=1)\n dates = data_ah['date'].astype('datetime64[s]')\n indices = []\n values = []\n val_err = []\n ndates = []\n for i, dt in enumerate(dates):\n min_tdiff = np.min(np.abs(f.datetime[:].astype('datetime64[s]')\n - dt))\n if min_tdiff.astype('int') > 1:\n msg = \"ERROR 02: No assumed height flux estimate can be\"\n msg += \" found for FITS value:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(dt))\n msg += \"-->FITS value: {}\\n\".format(data_ah['val'][i])\n logging.error(msg)\n else:\n idx = np.argmin(np.abs(f.datetime[:].\n astype('datetime64[s]') - dt))\n indices.append(idx)\n values.append(data_ah['val'][i])\n val_err.append(data_ah['err'][i])\n ndates.append(str(dt))\n if len(indices) > 0:\n pfb = PreferredFluxBuffer(fluxes=[f],\n flux_indices=[indices],\n value=values,\n value_error=val_err,\n datetime=ndates)\n d.new(pfb)\n\n # Read in the flux estimates for calculated height\n if station_info['files']['flux_ch'] is None:\n msg = \"INFO 04: No calculated height flux data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n else:\n e4 = d.read(station_info['files']['flux_ch'],\n date=datestr, ftype='minidoas-scan',\n station=station_info['wp_station_id'],\n timeshift=timeshift)\n fb1 = e4['FluxBuffer']\n dt = fb1.datetime[:].astype('datetime64[s]')\n indices = []\n for _dt in dt:\n idx = np.argmin(np.abs(cc.datetime[:].astype('datetime64[us]')\n - _dt))\n idx0 = idx\n while True:\n angle = rr.inc_angle[cc.rawdata_indices[idx]+1]\n if angle > 180.:\n break\n idx += 1\n idx1 = idx\n indices.append([idx0, idx1])\n fb1.concentration = cc\n fb1.concentration_indices = indices\n\n m3 = None\n for _m in d.elements['Method']:\n if _m.name[:] == 'WS2PVT':\n m3 = _m\n if m3 is None:\n mb3 = e4['MethodBuffer']\n new_description = mb3.description[0]\n new_description += '; plume geometry inferred from triangulation'\n mb3.description = new_description\n mb3.name = 'WS2PVT'\n m3 = d.new(mb3)\n\n gfb2 = e4['GasFlowBuffer']\n gfb2.methods = [m3]\n gf2 = d.new(gfb2)\n fb1.gasflow = gf2\n f1 = d.new(fb1)\n\n # Now read in preferred flux values for calculated\n # height downloaded from FITS\n if station_info['files']['fits_flux_ch'] is None:\n msg = \"ERROR 01: No preferred flux for\"\n msg = \" calculated height in FITS:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.error(msg)\n else:\n data_ch = np.loadtxt(station_info['files']['fits_flux_ch'],\n dtype=np.dtype([('date', 'S19'),\n ('val', np.float),\n ('err', np.float)]),\n skiprows=1, delimiter=',', ndmin=1)\n dates = data_ch['date'].astype('datetime64[s]')\n indices = []\n values = []\n val_err = []\n ndates = []\n for i, dt in enumerate(dates):\n min_tdiff = np.min(np.abs(f1.datetime[:].\n astype('datetime64[s]') - dt))\n if min_tdiff.astype('int') > 1:\n msg = \"ERROR 02: No calculated height flux estimate can be\"\n msg = \" found for FITS value:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(dt))\n msg += \"-->FITS value: {}\\n\".format(data_ah['val'][i])\n logging.error(msg)\n else:\n idx = np.argmin(np.abs(f1.datetime[:].\n astype('datetime64[s]') - dt))\n indices.append(idx)\n values.append(data_ch['val'][i])\n val_err.append(data_ch['err'][i])\n ndates.append(str(dt))\n if len(indices) > 0:\n pfb1 = PreferredFluxBuffer(fluxes=[f1],\n flux_indices=[indices],\n value=values,\n value_error=val_err,\n datetime=ndates)\n d.new(pfb1)", "def get_stations():\n stations_url = 'http://www.swt-wc.usace.army.mil/shefids.htm'\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'\n }\n resp = requests.get(stations_url, headers=headers)\n soup = BeautifulSoup(resp.content)\n pre = soup.find('pre')\n links = pre.find_all('a')\n stations = [\n _parse_station_link(link) for link in links\n ]\n\n return dict([\n (station['code'], station)\n for station in stations\n ])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes all params from api and extends cls with it. api class can be removed afterwards
def extend(cls, api): if cls.EXTEND: for name, func in api.__dict__.iteritems(): if name.startswith("_"): continue setattr(cls, name, MethodType(func, None, cls)) return cls.EXTEND
[ "def __init__(self):\n ApiHandler.__init__(self)", "def __call__(self, cls):\n self.check(cls)\n if not hasattr(cls, '_fused_base'):\n cls._fused_base = []\n cls._fused_base.append(self._base)\n return base(implementer(interface(self._base))(cls))", "def dynamic_class_creation(name, base=object):\n # Protected name in the schema\n if name in [\n \"__schema^2__\",\n ]:\n return None\n schema_entry = aapi_schema[\"AAPI_schema\"][name]\n helper_string = _construct_docstring(schema_entry)\n atype, ptype, delimiter = _determine_type(schema_entry)\n status = schema_entry.get(\"status\", \"production\")\n\n new_class = type(\n name,\n (base,),\n dict(\n __doc__=helper_string,\n name=name,\n atype=atype,\n ptype=ptype,\n delimiter=delimiter,\n status=status,\n ),\n )\n return new_class", "def base(cls: T) -> T:\n base.classes.add(cls)\n return cls", "def _make_api_method(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n args[0]._extra_params = kwargs.pop(\"extra_params\", None)\n result = func(*args, **kwargs)\n try:\n del args[0]._extra_params\n except AttributeError: # pragma: no cover\n pass\n return result\n\n return wrapper", "def subclass(klass, *args, **kws):\n kws['baseclass'] = klass\n if isinstance(klass, Template):\n templateAPIClass = klass\n else:\n templateAPIClass = Template\n return templateAPIClass.compile(*args, **kws)", "def patcher(APIMixin, LogKlass, api):\n\n class Log(APIMixin, LogKlass):\n\n def __init__(self, lines, *args, **kwargs):\n\n APIMixin.__init__(self, lines, *args, **kwargs)\n LogKlass.__init__(self, lines, *args, **kwargs)\n\n for a in dir(LogKlass):\n if not a.startswith('_') and not callable(getattr(LogKlass, a)):\n api['attributes'].add(a)\n default_value = getattr(LogKlass, a)\n api['defaults'][a] = default_value\n kind = type(default_value)\n if kind in (unicode, basestring, bytes) or issubclass(\n kind, basestring):\n kind = str\n if not issubclass(kind, collections.Iterable) or kind in (\n str, unicode, basestring):\n api['scalars'].add(a)\n else:\n api['vectors'].add(a)\n\n self.attributes = sorted(api['attributes'])\n\n for scalar in api['scalars']:\n\n value = partial(\n lambda self, scalar, *args, **kwargs: self._scalar(\n scalar, *args, **kwargs), self, scalar)\n setattr(self, scalar, value)\n\n value.any = value\n\n value.all = partial(\n lambda self, scalar, *args: self._scalar(\n scalar, *args, mode='all'), self, scalar)\n\n value.none = partial(\n lambda self, scalar, *args: self._scalar(\n scalar, *args, mode='none'), self, scalar)\n\n value.only = partial(\n lambda self, vector, *args: self._scalar(\n scalar, *args, mode='only'), self, scalar)\n\n for vector in api['vectors']:\n\n value = partial(\n lambda self, vector, *args, **kwargs: self._vector(\n vector, *args, **kwargs), self, vector)\n setattr(self, vector, value)\n\n value.any = value\n\n value.all = partial(\n lambda self, vector, *args: self._vector(\n vector, *args, mode='all'), self, vector)\n\n value.none = partial(\n lambda self, vector, *args: self._vector(\n vector, *args, mode='none'), self, vector)\n\n value.only = partial(\n lambda self, vector, *args: self._vector(\n vector, *args, mode='only'), self, vector)\n\n for line in self:\n for attribute, default in api['defaults'].items():\n if not hasattr(line, attribute):\n try:\n klass_default = getattr(LogKlass, attribute)\n setattr(line, attribute, klass_default)\n except:\n setattr(line, attribute, default)\n\n return Log", "def _call_api(self):\n raise NotImplementedError", "def extend_dataclass(_cls=None):\n\n def wrap(cls):\n return _process_class(cls)\n\n if _cls is None:\n return wrap\n return wrap(_cls)", "def __mutate(self, ns, bases):\n\n # TODO: Identify by more than just __qualname__ in case of name change\n for cls in self.old_classes:\n if cls.__qualname__ == ns.get('__qualname__'):\n mutate_class(cls, ns, bases)\n\n return cls", "def proxy_class(remote_class, name=None):\n if isinstance(remote_class, type) and issubclass(remote_class, Module):\n rcls = remote_class\n remote_class = rcls.__name__\n else:\n rcls = get_class(remote_class)\n if name is None:\n name = rcls.__name__\n\n for proxycls in PROXY_CLASSES:\n if issubclass(rcls, proxycls.__bases__[-1]):\n # avoid 'should not be redefined' warning\n proxycls.accessibles = {}\n break\n else:\n raise ConfigError(f'{remote_class!r} is no SECoP module class')\n\n attrs = rcls.propertyDict.copy()\n\n for aname, aobj in rcls.accessibles.items():\n if isinstance(aobj, Parameter):\n pobj = aobj.copy()\n # we have to set own properties of pobj to the inherited ones\n # this matters for the ProxyModule.status datatype, which should be\n # overidden by the remote class status datatype\n pobj.ownProperties = pobj.propertyValues\n pobj.merge({'needscfg': False})\n attrs[aname] = pobj\n\n def rfunc(self, pname=aname):\n value, _, readerror = self._secnode.getParameter(self.name, pname, True)\n if readerror:\n raise readerror\n return value\n\n attrs['read_' + aname] = rfunc\n\n if not pobj.readonly:\n\n def wfunc(self, value, pname=aname):\n value, _, readerror = self._secnode.setParameter(self.name, pname, value)\n if readerror:\n raise readerror\n return value\n\n attrs['write_' + aname] = wfunc\n\n elif isinstance(aobj, Command):\n cobj = aobj.copy()\n\n def cfunc(self, arg=None, cname=aname):\n return self._secnode.execCommand(self.name, cname, arg)\n\n attrs[aname] = cobj(cfunc)\n\n else:\n raise ConfigError(f'do not now about {aobj!r} in {remote_class}.accessibles')\n\n return type(name+\"_\", (proxycls,), attrs)", "def _override(self, name, obj):\n path = name.split('.')\n assert len(path) > 1, 'module name not provided'\n obj_name = path[-1]\n\n objs = self._resolvePath(path[:-1])\n container = objs[-1]\n try:\n original_class = getattr(container, obj_name, None)\n setattr(container, obj_name, obj)\n self._original.append((container, obj_name, original_class))\n except TypeError:\n # We have a static class; we will have to modify its container.\n # This works for global functions in gtk too because their\n # container is an ordinary python module (fake_gtk).\n name = container.__name__\n prev_container = objs[-2]\n subclass = type(name, (container, ), {obj_name: obj})\n setattr(prev_container, name, subclass)\n self._original.append((prev_container, name, container))", "def _import_api(self):\n resources = __import__('surveygizmo.api', globals(), locals(), ['*'])\n\n for resource_name in resources.__all__:\n resource = getattr(resources, resource_name)\n\n if issubclass(resource, base.Resource):\n self._resources[resource_name.lower()] = resource(self, self.config)", "def hookup(self, api):\n\n # assert not hasattr(api, self.module_name), \"\"\"\n # '{}' conflicts with existing attribute\n # \"\"\".format(self.module_name)\n\n self.api = api\n if not hasattr(api, self.module_name):\n setattr(api, self.module_name, self.execute)", "def build_from_api(cls, api: BasicKrakenExAPIPublicMethods):\n # TODO: check if currencies preloaded!\n if not Currency.all_symbols():\n Currency.build_from_api(api)\n\n pairs = api.get_asset_pairs()\n for symbol, info in pairs.items():\n base_cur = Currency.find(info[\"base\"])\n quote_cur = Currency.find(info[\"quote\"])\n _ = cls(\n symbol=symbol,\n altname=info[\"altname\"],\n name=info.get(\"wsname\", info[\"altname\"]),\n base=base_cur,\n quote=quote_cur,\n ordermin=info.get(\"ordermin\", None),\n pair_decimals=info[\"pair_decimals\"],\n )", "def __init__(self, *args):\n super(Base, self).__init__()", "def register_js_api(self, js_api_cls:BrowserAPI):\n if js_api_cls is not None:\n js_api = js_api_cls(self.__browser)\n self.__bindings.SetObject(js_api.__class__.__name__, js_api)\n self.__bindings.Rebind()", "def api_obj(self) -> flask_restful.Api:\n\n return self.api", "def api_get_class():\n query_parameters = request.args\n crn = query_parameters.get('id')\n course_name = query_parameters.get('course_name')\n return jsonify(get_classes(crn, course_name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks if the user is authorized for specific method
def isAuthorized(self, func, user): if user.isAdmin(): return True elif func in perm_map and user.hasPermission(perm_map[func]): return True else: return False
[ "def check_authorization(self):\n pass", "def authorize(self):\n return True", "def authorize(self, resource, **kwargs):\n method = request.method.lower()\n\n if hasattr(self, method):\n func = getattr(self, method)\n else:\n func = self.default\n\n if not func(resource, **kwargs):\n raise Forbidden('Access Denied')", "def is_authorized(self) -> bool:\n return False", "def has_permission(self, request, view):\n if view.action == 'create': #creating user. Anyone can register\n return True\n elif request.user.is_superuser: #superusers are allowed free access\n return True\n elif view.action in ['retrieve','update','destroy']:\n # action is GET PUT or DELETE and user is not superuser.\n # PUT and DELETE are relegated to object permissions\n # if GET is access to detail, relegate to object permissions, if GET is access to listing then not allow\n return True \n else: \n return False", "def check_authorization(self):\n self.token", "def authorize(\n user: Users, allowed_roles: List[str], resource: str, method: str\n) -> bool:\n db_user = Users.find_by_email(user.email)\n\n # User hasn't registered yet.\n if not db_user:\n # Although the user doesn't exist in the database, we still\n # make the user's identity data available in the request context.\n _set_current_user(user)\n\n # User is only authorized to create themself.\n if resource == \"self\" and method == \"POST\":\n return True\n\n raise Unauthorized(f\"{user.email} is not registered.\")\n\n _set_current_user(db_user)\n\n db_user.update_accessed()\n\n # User is registered but disabled.\n if db_user.disabled:\n # Disabled users are not authorized to do anything but access their\n # account info.\n if resource == \"self\" and method == \"GET\":\n return True\n\n raise Unauthorized(f\"{db_user.email}'s account is disabled.\")\n\n # User is registered but not yet approved.\n if not db_user.approval_date:\n # Unapproved users are not authorized to do anything but access their\n # account info.\n if resource == \"self\" and method == \"GET\":\n return True\n\n raise Unauthorized(f\"{db_user.email}'s registration is pending approval\")\n\n # User is approved and registered, so just check their role.\n if allowed_roles and db_user.role not in allowed_roles:\n raise Unauthorized(\n f\"{db_user.email} is not authorized to access this endpoint.\"\n )\n\n return True", "def can_request_assistance(user):\n return _is_in_acl(user, 'authorized')", "def _check_method(cls, allowed_methods=VALID_METHODS):\n if cherrypy.request.method.upper() not in allowed_methods:\n cherrypy.response.headers['Allow'] = (', ').join(allowed_methods)\n raise cherrypy.HTTPError(405)", "def post_authorization(self):\n pass", "def _is_client_authorized(self, client_id):\n if client_id is None or client_id not in settings.ALLOWED_FOR_GET:\n self.logger.error(\"%s does not have permission to view summaries\",\n client_id)\n return False\n self.logger.info(\"Authorizing user request\")\n return True", "def handle_missing_authorization(self, *args, **kwargs):\n return False", "def run_endpoint_checks(session, g, request, function):\n if is_route_login_required(function):\n if not user_is_logged_in(session, g, request):\n return False\n\n #TODO: Check security level.\n\n return True", "def has_permission(self, request, view):\n\n if not request.user.is_authenticated:\n return False\n\n if request.method == 'GET':\n if hasattr(request.user, 'profile') or hasattr(request.user, 'driver_profile') or hasattr(request.user,\n 'shop_profile'):\n return True\n\n if request.method == 'POST':\n if hasattr(request.user, 'profile'):\n return True\n\n if request.method == 'PATCH':\n if hasattr(request.user, 'driver_profile'):\n return True\n\n return False", "def token_authorized(method):\n def check_token(self, *args, **kwargs):\n auth_header = self.request.headers.get('Authorization', '')\n match = auth_header_pat.match(auth_header)\n if not match:\n raise web.HTTPError(403)\n token = match.group(1)\n db_token = self.db.query(orm.APIToken).filter(orm.APIToken.token == token).first()\n if db_token is None:\n raise web.HTTPError(403)\n return method(self, *args, **kwargs)\n check_token.__name__ = method.__name__\n check_token.__doc__ = method.__doc__\n return check_token", "def authorize_actuator(self, session, uri, action): # pylint: disable=W0613\n account = self._session.query(Account).filter(\n Account.username == session['authid']).one()\n actuator = self._session.query(Actuator).filter(\n Actuator.account_username == account.username).one()\n\n has_right = re.findall(\n r'^io\\.otoroshi\\.actuator\\.{}\\.(?:.+)\\.(high|low|toggle)$'.format(\n actuator.account_username), uri)\n if len(has_right) > 0:\n print(\"Actuator {} successfully registered {} procedure\".format(\n actuator, uri))\n return True\n\n print(\"Actuator {} failed to register {} procedure\".format(\n actuator, uri))\n return False", "def check_user_access(user, model_class, action, *args, **kwargs):\n for access_class in access_registry.get(model_class, []):\n access_instance = access_class(user)\n access_method = getattr(access_instance, 'can_%s' % action, None)\n if not access_method:\n continue\n result = access_method(*args, **kwargs)\n logger.debug('%s.%s %r returned %r',\n access_instance.__class__.__name__,\n access_method.__name__, args, result)\n if result:\n return result\n return False", "def pre_authorization(self):\n pass", "def is_authorized(user, allowed):\n if user not in allowed:\n raise RuntimeError(\"{} not authorized\".format(user))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performa click at the coordinates of top_left to the website opened by the driver
def clickScreen(driver, top_left): try: #myElem = WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.CLASS_NAME, 'game'))) game_element = driver.find_element_by_class_name("game") myElem = game_element action = webdriver.common.action_chains.ActionChains(driver) action.move_to_element_with_offset(myElem, top_left[0] + 50, top_left[1] + 50) action.click() action.perform() #print("Action Performed!") except TimeoutException: print("Loading took too much time!")
[ "def click_here(png_image, window_region):\n loc = pyautogui.locateOnScreen(\n png_image, grayscale=True, region=(window_region))\n if loc:\n loc_cent = pyautogui.center(loc)\n pyautogui.click(loc_cent)\n else:\n pyautogui.alert(text=\"Could not locate action to perform.\",\n title='DE-Automation', button='OK', root=None,\n timeout=None)\n sys.exit()\n return loc", "def _scroll_into_view(self,locator):\n element = self.selenium.get_webelement(locator)\n self.selenium.driver.execute_script(\"arguments[0].scrollIntoView({behavior: 'auto', block: 'center', inline: 'center'})\", element)", "def scroll_to_click(element):\n scroll_to(element)\n click(element)", "def click_status_and_search():\n try_click_image(IMG_STATUS)\n pyautogui.scroll(-7000)\n try_click_image(IMG_SZUKAJ)", "def MoveMouse(self):\n self._helper.Browser.Actions.move_to_element(self.wrappedElement).perform()", "def click_retry_with_elem_coordinates(driver, element):\n try:\n element.click()\n except WebDriverException as e:\n print(\"Element couldn't be clicked, error: '{}'\".format(e))\n h, w = PlatformBase.get_element_location(element)\n print(\"Will try to tap the element location w:{}, h:{}\".format(w, h))\n return driver.tap([(w, h)])", "def click(self):\n self.logger.info('clicking on page object {}'.format(self._log_id_short))\n self.logger.debug('clicking on page object; {}'.format(self._log_id_long))\n self.webelement.click()\n self.logger.info('successfully clicked on page object {}'.format(self._log_id_short))\n self.logger.debug('successfully clicked on page object; {}'.format(self._log_id_long))\n return self", "def scroll_to_top(self):\n\t\tself.driver.execute_script(\"window.scrollTo(0, 0)\")\n\t\ttime.sleep(.4)", "def click_home(self):\n self.find_element_by_xpath(self.home_xpath).click()", "def test_scroll_to_top(instagram):\n instagram.scroll_to_top()\n\n assert instagram.driver.execute_script.called\n assert \"scrollTo(0, 0)\" in str(instagram.driver.execute_script.call_args)", "def click(self, locator):\r\n self.find_element(locator).click()", "def go_to(lat, lng):\n text = '{},{}'.format(lat, lng)\n\n # Click on text input\n xdo.move_mouse(150, 125)\n xdo.click_window(win_id, 1)\n xdo.enter_text_window(win_id, text.encode('utf8'))\n\n sleep(0.5)\n\n # Click search button\n # move_reulative segfaults for some reason\n # xdo.move_mouse_relative_to_window(win_id, 50, 50)\n xdo.move_mouse(350, 125)\n sleep(0.25)\n xdo.click_window(win_id, 1)\n\n # Hide yellow pin\n xdo.move_mouse(375, 400)\n xdo.click_window(win_id, 1)\n xdo.move_mouse(375, 450)\n xdo.click_window(win_id, 1)", "def left_click(self):\n self.node.left_click()", "def focus(self):\n hover = ActionChains(self.driver).move_to_element(self._find_element())\n hover.click()\n hover.perform()", "def random_click(self, *loc):\n start_time = time.time()\n try:\n logger.info(\"Random click the element <{0} -> {1}>, Spend {2}\\\n seconds \".format(loc[0], loc[1], time.time() - start_time))\n return self.find_elements(*loc).click()\n except Exception:\n logger.warning(\"No element found, click failure, Spend {0} seconds\".format(time.time()-start_time))\n raise", "def _getDocOrigin(seleniumDriver):\n \n # this is the border width of the OS window\n border = seleniumDriver.execute_script(\"return (window.outerWidth - window.innerWidth)/2;\")\n \n # Assuming the window border is homogeneous and there is nothing in\n # the bottom of the window (firebug or something like that)\n menuHeight = seleniumDriver.execute_script(\"return (window.outerHeight-window.innerHeight) - %s*2;\"%border)\n \n absX = seleniumDriver.execute_script(\"return window.screenX + %s;\"%border)\n absY = seleniumDriver.execute_script(\"return window.screenY + %s + %s;\"%(border, menuHeight))\n \n return absX, absY", "def hover_and_click(self, locator_hover, locator_click):\r\n ActionChains(self.driver).move_to_element(self.find_element(locator_hover)).perform()\r\n self.click(locator_click)", "def elements_click(self, *loc, index):\n start_time = time.time()\n try:\n logger.info(\"Click the element <{0} -> {1}>, Spend {2} \\\n seconds\".format(loc[0], loc[1], time.time() - start_time))\n return self.find_elements('normal', *loc)[index].click()\n except Exception:\n logger.warning(\"Click the element <{0} -> {1}>, Spend {2} \\\n seconds\".format(loc[0], loc[1], time.time() - start_time))\n raise", "def flick(self, start_x, start_y, end_x, end_y):\n self._selenium_web_driver().flick(start_x, start_y, end_x, end_y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return Binary Image which is thresholded by thr rbg pixel vales given in rbg_threshold i.e. If pixel is > thres assign 1 and if pixel is < thres assing 0
def colorThreshold(img, rbg_threshold = (60,60,60)): temp = np.zeros(img.shape) rflags_h = img[:,:]>rbg_threshold[0] temp[:,:][rflags_h] = 1 return temp
[ "def brightness_binary(self) -> np.ndarray:\n thresholds = self.thresholds\n lab = cv2.cvtColor(self.image, cv2.COLOR_BGR2Lab)\n b_channel = lab[:, :, 2]\n b_binary = np.zeros_like(b_channel)\n b_binary[\n (b_channel >= thresholds.brightness[0]) &\n (b_channel <= thresholds.brightness[1])\n ] = 1\n return b_binary", "def apply_binary(image):\n img_out = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \\\n cv2.THRESH_BINARY, 5, 2)\n return img_out", "def grey_to_binary(img, thresh):\r\n thresh = thresh\r\n fn = lambda x: 255 if x>thresh else 0\r\n r = img.point(fn, mode=\"1\")\r\n# r.save('foo.png')\r\n print(\"convert mode from grey to binary\")\r\n return r", "def binarize(self, thresh = 127, maxv = 255, blocksize = 3, p = 5):\n if (is_tuple(thresh)):\n r = self.getEmpty(1) \n g = self.getEmpty(1)\n b = self.getEmpty(1)\n cv.Split(self.getBitmap(), b, g, r, None)\n \n \n cv.Threshold(r, r, thresh[0], maxv, cv.CV_THRESH_BINARY)\n cv.Threshold(g, g, thresh[1], maxv, cv.CV_THRESH_BINARY)\n cv.Threshold(b, b, thresh[2], maxv, cv.CV_THRESH_BINARY)\n \n \n cv.Add(r, g, r)\n cv.Add(r, b, r)\n \n \n return Image(r, colorSpace=self._colorSpace)\n \n \n elif thresh == -1:\n newbitmap = self.getEmpty(1)\n cv.AdaptiveThreshold(self._getGrayscaleBitmap(), newbitmap, maxv,\n cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV, blocksize, p)\n return Image(newbitmap, colorSpace=self._colorSpace)\n else:\n newbitmap = self.getEmpty(1) \n #desaturate the image, and apply the new threshold \n cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV)\n return Image(newbitmap, colorSpace=self._colorSpace)", "def lightness_binary(self) -> np.ndarray:\n thresholds = self.thresholds\n lab = cv2.cvtColor(self.image, cv2.COLOR_BGR2LAB)\n l_channel = lab[:, :, 0]\n l_binary = np.zeros_like(l_channel)\n l_binary[\n (l_channel >= thresholds.lightness[0]) &\n (l_channel <= thresholds.lightness[1])\n ] = 1\n return l_binary", "def threshold_binary(heatmap: np.ndarray, threshold: float) -> np.ndarray:\n arr = np.zeros_like(heatmap, dtype=np.bool)\n arr[np.where(heatmap >= threshold)] = True\n return arr", "def bwPic(self):\n #converting photo to grayscale\n self.gray_image = rgb2gray(self.image)\n #set threshold based on otsu algorithm (if above threshold, array set to 1, otherwise 0 creating black-and-white)\n self.th = threshold_otsu(self.gray_image)\n #create new image \n self.binary = self.gray_image > self.th\n #plots image\n plt.imshow(self.binary,cmap=plt.cm.gray)\n #debugging logger message\n logger.debug(f\"converted image to BW ...threshold...\")\n #returns it\n #return self.binary", "def color_threshold(img, s_thresh=(90, 255)):\n # Some other factors to consider 170 255\n # Convert to HLS color space and separate the V channel\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:, :, 2]\n # l_channel = hls[:, :, 1] #TODO (ivan) consider this in future improvements\n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n return s_binary", "def get_HSV_threshold_binary(img, hsv_thresholds=_HSV_YW_THRESHOLDS):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\n if hsv_thresholds is not None and len(hsv_thresholds) == 4:\n yellow_dark = hsv_thresholds[0]\n yellow_light = hsv_thresholds[1]\n white_dark = hsv_thresholds[2]\n white_light = hsv_thresholds[3]\n else:\n yellow_dark = np.array([15, 127, 127], dtype=np.uint8)\n yellow_light = np.array([25, 255, 255], dtype=np.uint8)\n white_dark = np.array([0, 0, 200], dtype=np.uint8)\n white_light = np.array([255, 30, 255], dtype=np.uint8)\n\n yellow_range = cv2.inRange(img, yellow_dark, yellow_light)\n white_range = cv2.inRange(img, white_dark, white_light)\n\n yellows_or_whites = yellow_range | white_range\n img = cv2.bitwise_and(img, img, mask=yellows_or_whites)\n\n return np.uint8(np.sum(img, axis=2, keepdims=False) > 0)", "def combined_threshold(img, color_space='BGR'):\n\ts_binary = s_threshold(img, color_space)\n\tsober_x_binary = gradient_threshold(img, color_space)\n\t# Stack each channel to view their individual contributions in green and blue respectively\n\t# This returns a stack of the two binary images, whose components you can see as different colors\n\t# color_binary = np.dstack((np.zeros_like(sober_x_binary), sober_x_binary, s_binary))\n\t# cv2.imshow('', color_binary)\n\t# cv2.waitKey(10000)\n\n\t# Combine the two binary thresholds\n\tcombined_binary = np.zeros_like(sober_x_binary)\n\tcombined_binary[(s_binary == 255) | (sober_x_binary == 255)] = 255\n\t# cv2.imshow('', combined_binary)\n\t# cv2.waitKey(10000)\n\treturn combined_binary", "def vertex_binary_color(binary: np.ndarray, x: int, y: int, r: float, r_factor: float, threshold: float) -> int:\n fill_ratio = circle_fill_ratio(binary, x, y, int(r * r_factor))\n if fill_ratio >= threshold:\n return 255\n else:\n return 0", "def binarize_image(tile, im_nuclei_stain, foreground_threshold, local_radius_ratio=3, minimum_radius = 3):\n\n ## Apply initial global threshold\n img = cv2.cvtColor((im_nuclei_stain),cv2.COLOR_GRAY2RGB)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_gray_flat = img_gray.flatten()\n thresh = np.round(threshold_otsu(img_gray_flat[img_gray_flat<foreground_threshold]))\n img_bin = np.copy(img_gray)\n img_bin[img_gray<thresh] = 255\n img_bin[img_gray>=thresh] = 0\n\n ## Fill small holes in the image\n img_bin = binary_fill_holes(img_bin.astype(bool))\n img_bin = img_bin.astype(np.uint8)\n\n ## Remove small structures in the image based on minimum_radius\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(minimum_radius,minimum_radius))\n opening = cv2.morphologyEx(img_bin,cv2.MORPH_OPEN, kernel, iterations = 1)\n\n ## Identify connected regions(\"components\") in the image\n regions = cv2.connectedComponents(opening)[1]\n obj_props = regionprops(regions, intensity_image=im_nuclei_stain)\n\n ## Initialize mask\n im_fgnd_mask = np.zeros(im_nuclei_stain.shape).astype(np.uint8)\n\n ## Iterate through regions found via global thresholding\n for obj in obj_props:\n\n # Skip thresholding on background component\n if (obj.label == 0):\n continue\n\n # Expand bounding box based on local_radius_ratio\n # The idea is to include more background for local thresholding.\n bbox = obj.bbox\n equivalent_diameter = obj.equivalent_diameter\n min_row = np.max([0, np.round(bbox[0] - equivalent_diameter*local_radius_ratio)]).astype(np.int)\n max_row = np.min([tile.shape[0], np.round(bbox[2] + equivalent_diameter*local_radius_ratio)]).astype(np.int)\n min_col = np.max([0, np.round(bbox[1] - equivalent_diameter*local_radius_ratio)]).astype(np.int)\n max_col = np.min([tile.shape[1], np.round(bbox[3] + equivalent_diameter*local_radius_ratio)]).astype(np.int)\n region = im_nuclei_stain[min_row:max_row, min_col:max_col]\n region_flat = region.flatten()\n\n # If local threshold fail. Default to global threshold instead.\n try:\n thresh = np.round(threshold_otsu(region_flat[region_flat<foreground_threshold]))\n except:\n thresh = foreground_threshold\n\n # Copy local bbox mask to larger tile mask\n region_bin = np.copy(region)\n region_bin[region<thresh] = 1\n region_bin[region>=thresh] = 0\n im_fgnd_mask[min_row:max_row, min_col:max_col] = im_fgnd_mask[min_row:max_row, min_col:max_col] + region_bin.astype(np.uint8)\n im_fgnd_mask[im_fgnd_mask>0] = 1\n\n return(im_fgnd_mask)", "def hls_select(img, thresh=(170, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n binary_output = np.zeros_like(s_channel)\n binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n return threshold(binary_output)", "def saturation_binary(self) -> np.ndarray:\n thresholds = self.thresholds\n hls = cv2.cvtColor(self.image, cv2.COLOR_BGR2HLS)\n s_channel = hls[:, :, 2]\n s_binary = np.zeros_like(s_channel)\n s_binary[\n (s_channel >= thresholds.saturation[0]) &\n (s_channel <= thresholds.saturation[1])\n ] = 1\n return s_binary", "def create_mask(img, thresh, max_val):\r\n _, mask_ret = cv2.threshold(img, thresh, max_val, cv2.THRESH_BINARY)\r\n return mask_ret", "def threshold_img(data, threshold, mask=None, mask_out='below'):\n if mask is not None:\n mask = threshold_img(mask, threshold, mask_out=mask_out)\n return data * mask.astype(bool)\n if mask_out.startswith('b'):\n data[data < threshold] = 0\n elif mask_out.startswith('a'):\n data[data > threshold] = 0\n return data", "def make_binary_grid(\n grid: np.ndarray, threshold: float, negate: bool = False\n) -> np.ndarray:\n if not negate:\n grid = 255 - grid\n return (grid / 255) > threshold", "def _perform_threshold(self, img):\r\n img = cv2.GaussianBlur(img,\r\n ksize=(5, 5),\r\n sigmaX=0)\r\n return cv2.adaptiveThreshold(\r\n img,\r\n maxValue=255,\r\n adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\r\n thresholdType=cv2.THRESH_BINARY_INV,\r\n blockSize=7,\r\n C=7\r\n )", "def binarize_preds(predictions: torch.Tensor, threshold=0.5) -> torch.Tensor:\n return predictions.__ge__(threshold).int()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs the optimal clicks for the given amt
def setAmountV2(driver, amt, amount_dict, board_coord): key_lst =optimizeAmtClick(amt); for k in key_lst: clickScreen(driver,amount_dict['amt_region']) clickScreen(driver,amount_dict[k]) clickScreen(driver,board_coord)
[ "def approachTarget(self, amount):\n if amount == 0:\n # If amount is zero, do nothing.\n return\n \n if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:\n # If 'self.approachTarget()' will not take the view within twice the\n # tolerance distance, approach the target by given amount:\n self.p = self.p.add(self.t.sub(self.p).scale(amount))", "def foregone_utility(self, item, j, action):\n\n if action == 0:\n bid = self.strategies[item][action][j]\n # Return 0 for case where bid exceeds the valuation of item to be bought\n if bid <= self.curr_best_offer[item] or self.curr_units[item] >= self.max_holdings[item] or \\\n bid > self.get_val(self.add_one(item)):\n return 0\n else:\n return self.get_val(self.add_one(item)) - self.curr_best_offer[item]\n else:\n offer = self.strategies[item][action][j]\n # Return 0 for case where the offer is lower than the valuation of item to be sold\n if offer >= self.curr_best_bid[item] or self.curr_units[item] <= self.min_holdings[item] or \\\n offer < self.get_val(self.curr_units):\n return 0\n else:\n return self.curr_best_bid[item] - self.get_val(self.curr_units)", "async def buypack( self, ctx, amt: int = 1 ):\n #Just make sure they can\n if amt < 1:\n await ctx.send(\"Invalid input.\" )\n return\n if not await bank.can_spend(ctx.author, amt * config.PACK_PRICE):\n await ctx.send(\"Not enough money for {amt} packs. They are currently ${pkpr} each, coming to a total of ${totalcost}, whereas you have ${bal}.\".format(\n amt=amt,\n pkpr=config.PACK_PRICE,\n totalcost=amt*config.PACK_PRICE,\n bal=await bank.get_balance(ctx.author)\n ))\n return\n #Data stuff, then printing\n grantPacks( ctx.author, amt )\n grantMoney( ctx.author, -1 * amt * config.PACK_PRICE )\n if amt == 1:\n await ctx.send( \"Bought a pack! Open it with =openpack.\" )\n else:\n await ctx.send( \"Bought \" + str(amt) + \" packs!!!! Open them with =openpack!!!!!!!\" )", "def buy_in(t, amt):\n for player in t.players:\n place_bet(amt, player, t.pot)", "def hunt(self, trials=10000, sleep_time=0.1):\n num_runs = 0\n pre_arbitrage_assets = self.load_arbitrage_assets()\n time.sleep(sleep_time)\n while(num_runs < trials):\n try:\n self.update_orderbook()\n except ConnectionError as e:\n print(e + \"will suspend bot for 10 seconds\")\n time.sleep(10)\n continue\n #Search for inefficiency\n orderbook_btc = self.orderbook_btc_eth(self.orderbook)\n orderbook_eth = self.orderbook_eth_btc(self.orderbook)\n if(orderbook_btc[0][1] - (self.fee * orderbook_btc[0][1]) > self.bit_rate['btc_one'] and\n orderbook_eth[0][1] - (self.fee * orderbook_eth[0][1]) > float(self.bit_rate['askPrice'])): \n #print('found' + orderbook_btc[0][0] + orderbook_eth[0][0] + str(num_runs))\n num_runs += 1\n purchase = []\n for k in self.orderbook:\n if(list(k.keys())[0] == orderbook_btc[0][0]):\n purchase.insert(0, k)\n if(list(k.keys())[0] == orderbook_eth[0][0]):\n purchase.insert(1, k)\n btc_limit = binance_config.btc_trade_limit\n while(btc_limit > 0.001):\n if(self.determine_feasibility(orderbook_btc[0][0], orderbook_eth[0][0], purchase, btc_limit) is True):\n self.execute_trade(orderbook_btc[0][0], orderbook_eth[0][0], purchase, btc_limit)\n break\n else:\n btc_limit = btc_limit - 0.001\n num_runs += 1\n if(num_runs % 100 == 0):\n print(str(num_runs))\n post_arbitrage_assets = self.load_arbitrage_assets()\n \n #Print results\n time_delta = datetime.datetime.now().replace(microsecond=0) - pre_arbitrage_assets['datetime'] \n print('Initial: BTC:', pre_arbitrage_assets['BTC'],'ETH:', pre_arbitrage_assets['ETH'], 'BNB:', pre_arbitrage_assets['BNB'])\n print('After__: BTC:', post_arbitrage_assets['BTC'],'ETH:', post_arbitrage_assets['ETH'], 'BNB:', post_arbitrage_assets['BNB'])\n print('Diff___: BTC:', float(post_arbitrage_assets['BTC'])-float(pre_arbitrage_assets['BTC']),\n 'ETH:', float(post_arbitrage_assets['ETH'])-float(pre_arbitrage_assets['ETH']),\n 'BNB:', float(post_arbitrage_assets['BNB'])-float(pre_arbitrage_assets['BNB']),\n 'TIME:', divmod(time_delta.total_seconds(), 60))", "def _clickthrough_rate(impressions, clicks):\n if impressions:\n return (float(clicks) / impressions) * 100.\n else:\n return 0", "def _disburse( self, amount ):\n for x in self.active:\n for y in x:\n c, source_classifiers = y\n c.pay( amount / float(len(self.active)) )\n for z in source_classifiers:\n z.pay( amount / float(len(source_classifiers)) )", "def click_rate(self):\n if self.click_recipient_count > 0 and self.recipient_details_count > 0:\n return round(float(self.click_recipient_count) / float(self.recipient_details_count) * 100, 2)\n\n return 0", "def bid_algorithm(budget_left, auction_id, last_bid, won, price_paid, last_two_aves,high_bid_warning, high_bid_count ):\n \n if high_bid_warning:\n if high_bid_count < 10:\n high_bid_count+=1\n return 0\n else:\n high_bid_count = 0\n high_bid_warning = False\n random_seed= random.random()\n if random_seed < 0.06 and random_seed > 0:\n bid_amount = random.random() * 200 + 400\n if bid_amount < budget_left:\n return bid_amount\n bid_amount = 0\n if not won:\n diff_slot_1 = last_two_aves[0][0] - last_two_aves[1][0]\n diff_slot_2 = last_two_aves[0][1] - last_two_aves[1][1]\n diff_slot_3 = last_two_aves[0][2] - last_two_aves[1][2]\n if diff_slot_1 <= 0 and last_two_aves[0][0] + 2 < budget_left:\n bid_amount = last_two_aves[0][0] + 1 + random.random()\n elif diff_slot_2 <= 0 and last_two_aves[0][1] + 2 < budget_left:\n bid_amount = last_two_aves[0][1] + 1 + random.random()\n elif diff_slot_3 <= 0 and last_two_aves[0][2] + 2 < budget_left:\n bid_amount = last_two_aves[0][2] + 1 + random.random()\n else:\n if diff_slot_1 >= 2* last_two_aves[1][0]:\n high_bid_warning = True\n return 0\n if last_two_aves[0][2] < budget_left:\n bid_amount = last_two_aves[0][2]\n # print(bid_algorithm)\n else:\n bid_amount = 0\n elif won and price_paid + 2 < budget_left:\n bid_amount = price_paid + 1 + random.random()\n elif auction_id in range(auction_id- (int) (auction_id/4), auction_id +1):\n bid_amount = budget_left/5\n else:\n bid_amount = budget_left\n# print(bid_amount)\n return bid_amount", "def pay_cost(self, source, amount: int) -> int:\n\t\tif self.spells_cost_health and source.type == CardType.SPELL:\n\t\t\tself.log(\"%s spells cost %i health\", self, amount)\n\t\t\tself.game.queue_actions(self, [Hit(self.hero, amount)])\n\t\t\treturn amount\n\t\tif self.temp_mana:\n\t\t\t# Coin, Innervate etc\n\t\t\tused_temp = min(self.temp_mana, amount)\n\t\t\tamount -= used_temp\n\t\t\tself.temp_mana -= used_temp\n\t\tself.log(\"%s pays %i mana\", self, amount)\n\t\tself.used_mana += amount\n\t\treturn amount", "def click(self, event):\r\n point = [event.x, event.y]\r\n for j in range(len(self.balls)):\r\n if j >= len(self.balls):\r\n break\r\n ball = self.balls[j]\r\n if ball.click(point):\r\n self.balls.pop(j)\r\n self.cnt -= 1\r\n self.score += 1\r\n while self.cnt < self.n:\r\n self.add_ball()", "def grind_money_loop(self, loopnum, search_on_off, memes_on_off, trivia_on_off=False, give_on_off=False, give_user=\"\",\r\n priority_search=\"net gain\", hunt_on_off=False, rob_on_off=False, user_to_rob=\"\"):\r\n beg_time=self.stable_beg()\r\n if search_on_off:\r\n search_time=self.stable_search(priority_search)\r\n if memes_on_off:\r\n meme_time=self.stable_postmeme()\r\n\r\n hunt_time=self.stable_hunt()\r\n if trivia_on_off==True:\r\n trivia_time=self.stable_trivia()\r\n deposit_time=perf_counter()\r\n if rob_on_off:\r\n rob_time=self.stable_rob(user=user_to_rob)\r\n\r\n\r\n total=0\r\n start=perf_counter()\r\n while perf_counter()-start<loopnum:\r\n\r\n if perf_counter()-beg_time>self.beg_cooldown+2:\r\n print(\"begging\")\r\n\r\n beg_time = self.stable_beg()\r\n total+=1\r\n\r\n if search_on_off==True:\r\n if perf_counter()-search_time>self.search_cooldown:\r\n print(\"searching\")\r\n\r\n search_time = self.stable_search(priority_search)\r\n total+=1\r\n\r\n if rob_on_off==True:\r\n if perf_counter()-rob_time>self.rob_cooldown:\r\n print(\"robbing\")\r\n\r\n rob_time=self.stable_rob(user=user_to_rob)\r\n\r\n if memes_on_off==True:\r\n if perf_counter()-meme_time>self.meme_cooldown:\r\n print(\"posting meme\")\r\n\r\n meme_time = self.stable_postmeme()\r\n total+=1\r\n if hunt_on_off==True:\r\n if perf_counter()-hunt_time>self.hunt_cooldown:\r\n print(\"hunting\")\r\n\r\n hunt_time = self.stable_hunt()\r\n total+=1\r\n\r\n if trivia_on_off==True:\r\n if perf_counter()-trivia_time>self.trivia_cooldown:\r\n print(\"triviaing\")\r\n\r\n trivia_time=self.stable_trivia()\r\n total+=1\r\n sleep(1.3)\r\n\r\n elif perf_counter()-deposit_time>100:\r\n print(\"depositing\")\r\n self.write_to_chat(\"pls deposit all\")\r\n sleep(1)\r\n deposit_time=perf_counter()\r\n\r\n if give_on_off==True:\r\n print(\"giving\")\r\n self.write_to_chat(\"pls give \"+ give_user+ \" all\")\r\n sleep(1)", "def performAction(self, action):\n self.t += 1\n # Map the action integer to a torque and displacement.\n assert round(action[0]) == action[0]\n\n if self.only_steer:\n T = 2 * (action[0] / 4.0 - 1.0)\n d = 0.\n else:\n # -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for\n # action in {6, 7, 8}\n torque_selector = np.floor(action[0] / 3.0) - 1.0\n T = 2 * torque_selector\n # Random number in [-1, 1]:\n p = 2.0 * np.random.rand() - 1.0\n # -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for\n # action in {2, 5, 8}\n disp_selector = action[0] % 3 - 1.0\n d = 0.02 * disp_selector + self._butt_disturbance_amplitude * p\n super(BalanceTask, self).performAction([T, d])", "def simulate_clicker(build_info, duration, strategy):\n state=ClickerState()\n build_info_clone=build_info.clone()\n count=state.get_time()\n while count <= duration :\n\n item= strategy(state.get_cookies(),state.get_cps(),duration-count,build_info_clone)\n \n if item == None :\n state.wait(duration)\n break\n \n time_required=state.time_until(build_info.get_cost(item))\n \n if (state.get_time()+time_required) > duration :\n state.set_cookies(state.get_cookies()+((duration-state.get_time())*state.get_cps()))\n state.set_time(state.get_time()+(duration-state.get_time()))\n state.set_total_cookies(state.get_cookies())\n break\n else:\n state.wait(time_required)\n \n \n flag=state.buy_item(item,build_info.get_cost(item),build_info.get_cps(item)) \n \n if flag==1:\n build_info.update_item(item)\n \n count= state.get_time()\n \n\n return state", "async def buy(self, ctx, *, auction_item: str):\n author = ctx.author\n await self._set_bank(author)\n i = 0;\n items = [item for item in self._shop[\"picitems\"] if item[\"name\"] in self.settings[\"user\"][str(author.id)][\"items\"]]\n for item2 in self._shop[\"picitems\"]:\n if item2[\"name\"].lower() == auction_item.lower():\n for item in items:\n i = i + 1\n if i >= 1:\n await ctx.send(\"You already own a pickaxe, sell your pickaxe and try again :no_entry:\")\n return\n filtered = filter(lambda x: x[\"name\"].lower() == auction_item.lower(), self._auction[\"items\"]) \n filtered = sorted(filtered, key=lambda x: x[\"price\"])\n if not filtered:\n await ctx.send(\"There is no `{}` on the auction house :no_entry:\".format(auction_item.title()))\n return\n server = ctx.guild\n channel = ctx.channel\n author = ctx.author\n \n if server.id not in PagedResultData.paged_results:\n PagedResultData.paged_results[server.id] = dict()\n \n if channel.id not in PagedResultData.paged_results[server.id]:\n PagedResultData.paged_results[server.id][channel.id] = dict()\n \n paged_result = PagedResult(filtered, lambda item: \"\\n**Name:** \" + item[\"name\"] + \"\\n**Price:** \" + str(item[\"price\"]) + \"\\n\" + (\"**Durability:** \" + str(item[\"durability\"]) + \"\\n\" if \"durability\" in item else \"\") + (\"**Amount:** \" + str(item[\"amount\"]) + \"\\n\" if \"amount\" in item else \"**Amount:** 1\"))\n paged_result.list_indexes = True\n paged_result.selectable = True\n \n async def selected(event):\n item = event.entry\n if item not in self._auction[\"items\"]:\n await channel.send(\"That item was recently bought :no_entry:\")\n return\n owner = discord.utils.get(self.bot.get_all_members(), id=int(item[\"ownerid\"]))\n if owner == ctx.message.author:\n await channel.send(\"You can't buy your own items :no_entry:\")\n return\n if item[\"price\"] > self.settings[\"user\"][str(author.id)][\"balance\"]:\n await channel.send(\"You don't have enough money for that item :no_entry:\")\n return\n self._auction[\"items\"].remove(item)\n \n self.settings[\"user\"][str(author.id)][\"balance\"] -= item[\"price\"]\n self.settings[\"user\"][str(owner.id)][\"balance\"] += item[\"price\"]\n \n try:\n if item[\"durability\"]:\n self.settings[\"user\"][str(author.id)][\"pickdur\"] = item[\"durability\"]\n except:\n pass\n \n try:\n if item[\"amount\"]:\n pass\n except:\n item[\"amount\"] = 1\n \n for x in range(0, item[\"amount\"]):\n self.settings[\"user\"][str(author.id)][\"items\"].append(item[\"name\"].title())\n try:\n await channel.send(\"You just bought `{} {}` for **${:,}** :tada:\".format(item[\"amount\"], item[\"name\"], item[\"price\"]))\n except:\n await channel.send(\"You just bought `1 {}` for **${:,}** :tada:\".format(item[\"name\"], item[\"price\"]))\n try:\n await owner.send(\"Your `{}` just got bought on the auction house, it was sold for **${:,}** :tada:\".format(item[\"name\"], item[\"price\"]))\n except:\n pass\n \n dataIO.save_json(self._auction_file, self._auction)\n dataIO.save_json(self.location, self.settings)\n \n paged_result.on_select = selected\n\n message = await channel.send(embed=paged_result.get_current_page_embed())\n\n paged_result.message_id = message.id\n\n PagedResultData.paged_results[server.id][channel.id][author.id] = paged_result", "def greedy(items,maxCost,keyFunction):\n \n itemsByKey = sorted(items,key=keyFunction,reverse=True)\n \n result = []\n totalValue, totalCost = 0.0, 0.0\n \n for i in range(len(itemsByKey)):\n if (totalCost + itemsByKey[i].getCalories()) <= maxCost:\n result.append(itemsByKey[i])\n totalCost += itemsByKey[i].getCalories()\n totalValue += itemsByKey[i].getValue()\n \n return (result,totalValue)", "def update_cooling_demand(self, action: float):\n\n raise NotImplementedError", "def get_click_prob(theta, bid):\n th = theta['a'] + theta['bid'] * float(bid) + theta['0'] # TODO: really? have bid in this section????\n p_click = theta['max_click_prob'] / (1 + np.exp(-th)) # TODO: introduce more robust function\n return p_click", "def __addWinnings(self, multiplier):\n self.balance += self.bet * multiplier" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dictionary of Courdinates of different board locations
def getAllBoardCoord(driver): board_list = ['hi', 'mid', 'lo'] board_dict = {} for b in board_list: tmp = getTemplate(b) game_image = getGameImage(driver, 'layer2') board_coord = detectTemplate(game_image, tmp, False, -1) board_dict[b] = board_coord return board_dict
[ "def ps_moves_per_loc():\n _dict = {}\n for i in range(8):\n for j in range(8):\n _dict[str((i, j))] = {}\n set_2nd_layer_keys_moves(_dict)\n return _dict", "def world_cups():\n return [(\"Germany\", 2006, \"Italy\"), (\"South-Africa\", 2010, \"Spain\"), (\"Brazil\", 2014, \"Germany\")]", "def get_board_dict(self):\n return {p: self.as_string(p) for p in board.positions()}", "def get_dict_of_moves_with_cost(self):\n dict_of_costs_with_moves = {}\n zero_is_in_corner = self.is_empty_in_corner()\n y_coordinate, x_coordinate = np.where(self.state == 0)\n y_coordinate = y_coordinate[0]\n x_coordinate = x_coordinate[0]\n\n y_length, x_length = self.state.shape\n\n for y_move, x_move in always_available_moves:\n if y_move != 0:\n # checks if you go out of bounds of y axis\n if y_move + y_coordinate >= y_length or y_move + y_coordinate < 0:\n continue\n if x_move != 0:\n # checks if you go out of bounds of x axis\n if x_move + x_coordinate >= x_length or x_move + x_coordinate < 0:\n # wraps around x axis\n if zero_is_in_corner:\n if 2 in dict_of_costs_with_moves:\n dict_of_costs_with_moves[2].append([y_move, x_move])\n else:\n dict_of_costs_with_moves[2] = [[y_move, x_move]]\n continue\n\n if 1 in dict_of_costs_with_moves:\n dict_of_costs_with_moves[1].append([y_move, x_move])\n else:\n dict_of_costs_with_moves[1] = [[y_move, x_move]]\n\n if zero_is_in_corner:\n is_in_top_left = y_coordinate == 0 and x_coordinate == 0\n is_in_top_right = y_coordinate == 0 and x_coordinate == x_length - 1\n is_in_bottom_left = y_coordinate == y_length - 1 and x_coordinate == 0\n is_in_bottom_right = y_coordinate == y_length - 1 and x_coordinate == x_length - 1\n\n if is_in_top_left:\n dict_of_costs_with_moves[3] = [[1, 1], [-1, -1]]\n if is_in_bottom_left:\n dict_of_costs_with_moves[3] = [[-1, 1], [1, -1]]\n if is_in_top_right:\n dict_of_costs_with_moves[3] = [[1, -1], [-1, 1]]\n if is_in_bottom_right:\n dict_of_costs_with_moves[3] = [[-1, -1], [1, 1]]\n\n return dict_of_costs_with_moves", "def _coll_cruises(colls):\n cc = {}\n for coll in colls:\n cc[coll] = cruises_sort_by_date_start(coll.cruises)\n return cc", "def _minimap_to_grid(self, pos_name):\n for k in range(len(self.minimap)):\n for l in range(len(self.minimap[k])):\n if pos_name == self.minimap[k][l]:\n cordx = l*41\n cordy = k*41\n return cordx, cordy", "def generateCities(self):\n citiesDict = {}\n for k in CITIES_TEMPLATE:\n citiesDict[k] = City(k, CITIES_TEMPLATE[k][\"connections\"], CITIES_TEMPLATE[k][\"colour\"])\n return citiesDict", "def _getSquaresDict(self) -> dict:\n squares = {}\n for y in range(0, len(self._map)):\n row = self._map[y]\n for x in range(0, len(row)):\n char = row[x]\n pos = array([x, y])\n if char in squares.keys():\n squares[char].append(pos)\n else:\n squares[char] = [pos]\n \n return squares", "def calculate_positions(self):\n positions = {}\n row_number = 0\n for row in self.board.board:\n cell_number = 0\n number_of_cells = len(row)\n for cell in row:\n if row_number % 2 == 0:\n xCoordinate = (number_of_cells / -2) + cell_number\n yCoordinate = -row_number * math.sqrt(0.75)\n else:\n xCoordinate = (number_of_cells / -2) + cell_number\n yCoordinate = -row_number * math.sqrt(0.75)\n cell_number += 1\n positions[cell] = (xCoordinate, yCoordinate)\n row_number += 1\n return positions", "def world_cups_by_key(world_cups_list):\n world_cups_with_labels = {}\n for w in world_cups_list:\n world_cups_with_labels[w[0]+str(w[1])] = w\n return world_cups_with_labels", "def _get_squares(self, list_of_coords):\n square_values = {x: self.get_square(x) for x in list_of_coords}\n square_map = {SQUARE.X: [], SQUARE.O: [], SQUARE.Empty: []}\n for key, value in square_values.items():\n square_map[value].append(key)\n return square_map", "def _init_pieces(self, board):\n color = self.get_color()\n\n if color == 'blue':\n pieces = {'bso1': Soldier('bso1', 'a7', board), 'bso2': Soldier('bso2', 'c7', board),\n 'bso3': Soldier('bso3', 'e7', board), 'bso4': Soldier('bso4', 'g7', board),\n 'bso5': Soldier('bso5', 'i7', board), 'bca1': Cannon('bca1', 'b8', board),\n 'bca2': Cannon('bca2', 'h8', board), 'bge1': General('bge1', 'e9', board),\n 'bch1': Chariot('bch1', 'a10', board), 'bel1': Elephant('bel1', 'b10', board),\n 'bho1': Horse('bho1', 'c10', board), 'bgu1': Guard('bgu1', 'd10', board),\n 'bgu2': Guard('bgu2', 'f10', board), 'bel2': Elephant('bel2', 'g10', board),\n 'bho2': Horse('bho2', 'h10', board), 'bch2': Chariot('bch2', 'i10', board)}\n else:\n pieces = {'rso1': Soldier('rso1', 'a4', board), 'rso2': Soldier('rso2', 'c4', board),\n 'rso3': Soldier('rso3', 'e4', board), 'rso4': Soldier('rso4', 'g4', board),\n 'rso5': Soldier('rso5', 'i4', board), 'rca1': Cannon('rca1', 'b3', board),\n 'rca2': Cannon('rca2', 'h3', board), 'rge1': General('rge1', 'e2', board),\n 'rch1': Chariot('rch1', 'a1', board), 'rel1': Elephant('rel1', 'b1', board),\n 'rho1': Horse('rho1', 'c1', board), 'rgu1': Guard('rgu1', 'd1', board),\n 'rgu2': Guard('rgu2', 'f1', board), 'rel2': Elephant('rel2', 'g1', board),\n 'rho2': Horse('rho2', 'h1', board), 'rch2': Chariot('rch2', 'i1', board)}\n\n self._pieces = pieces\n allowed_destinations = set()\n\n for piece in pieces.values():\n piece.update_hyp_moves()\n piece.update_allowed_moves()\n allowed_destinations |= set(piece.get_allowed_moves())\n\n self.set_allowed_destinations(allowed_destinations)", "def possible_coords(rounding):\n\n # create the dictionary with all possible coordinate combinations\n dicti = {}\n for longi in np.linspace(-180, 180, 360//rounding + 1):\n for lati in np.linspace(-90, 90, 180//rounding + 1):\n calc = (lati, longi)\n dicti[calc] = 0\n\n return dicti", "def create_and_initialize_board():\n board = create_board()\n center_index_1 = BOARD_SIZE // 2\n center_index_2 = center_index_1 - 1\n board[center_index_2][center_index_2] = PLAYER_CHIPS[0]\n board[center_index_2][center_index_1] = PLAYER_CHIPS[1]\n board[center_index_1][center_index_2] = PLAYER_CHIPS[1]\n board[center_index_1][center_index_1] = PLAYER_CHIPS[0]\n return board", "def get_channel_clrs():\n return dict(b='blue', r='red', z='purple')", "def getCourseEntries():\n\trows = getCourseRows()\n\t\n\tprint \"Extracting individual courses...\"\n\t\n\tcourses = []\n\t\n\t#uniqueCourses = {}\n\t\n\tfor row in rows:\n\t\tcourse = getCourseEntryFromCourseRow(row)\n\t\t\n\t\t#if not uniqueCourses.has_key(course.subject + course.code):\n\t\t#\tuniqueCourses[course.subject + course.code] = course\n\t\t\n\t\tcourses.append(course)\n\n\n\tprint \"done!\"\n\tprint \"\"\n\tprint \"Total course rows: \", len(courses)\n\t#print \"Total unique courses: \", len(uniqueCourses.keys())\n\t\n\treturn courses", "def _get_eligible_chs_bitmap(self, grid, cell):\n r, c = cell\n neighs = self.neighbors(2, r, c, separate=True, include_self=True)\n alloc_map = np.bitwise_or.reduce(grid[neighs])\n return alloc_map", "def to_co_ords(moves):\n co_ords = [(0, 0)]\n for move in moves:\n co_ords.extend(calc_co_ord(co_ords[-1], move))\n return co_ords[1:]", "def crime_category_breakdown():\n db_request = main_db_call()\n all_crimes = [item[0] for item in db_request]\n sub_offense = offense_counter(all_crimes)\n sub_pie = color_applicator(sub_offense)\n sub_dict = {}\n for i, thing in enumerate(sub_pie):\n for key, category in UPPER_DICT.items():\n if sub_pie[i][0] in category:\n sub_dict.setdefault(key, [])\n sub_dict[key].append(sub_pie[i])\n return sub_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a random datetime between `start` and `end`
def random_date(start, end): random_time = start + timedelta( seconds=randint(0, int((end - start).total_seconds())), ) hour = numpy.random.choice(hours, p=probabilities) return random_time.replace(hour=hour)
[ "def random_datetime(start=START_DATE, end=END_DATE):\n delta = end - start\n int_delta = (delta.days * 24 * 60 * 60) + delta.seconds\n random_second = random.randrange(int_delta)\n return start + timedelta(seconds=random_second)", "def random_datetimes(start, end, ntimes):\n delta = end - start\n int_delta = int(delta.total_seconds())\n return [start + timedelta(seconds=randrange(int_delta))\n for _ in range(ntimes)]", "def uniform_random_timestamp(start, end, timescale='minutes'):\n\n if timescale == 'minutes':\n return start + dt.timedelta(\n minutes = np.random.uniform(0, (end-start).seconds/60)\n )\n\n if timescale == 'days':\n return start + dt.timedelta(\n days = np.random.uniform(0, (end-start).days)\n )", "def random_date_generator(start_date):\n\n\t\trange_in_days = current_date + np.timedelta64(-T, \"D\") - np.datetime64(start_date)\n\t\tdays_to_add = np.arange(1, range_in_days-1)\n\t\trandom_date = np.datetime64(start_date) + np.random.choice(days_to_add, n, replace=False)\n\t\treturn random_date", "def date_time_between(cls, start_date='-30y', end_date='now'):\r\n start_date = cls._parse_date_time(start_date)\r\n end_date = cls._parse_date_time(end_date)\r\n timestamp = random.randint(start_date, end_date)\r\n return datetime.fromtimestamp(timestamp)", "def gen_date():\r\n return random.randint(DAY1, TODAY)", "def gen_time(lower, upper):\n gen_time_ord = random.randrange(lower.ord(), upper.ord())\n return ClockTime.from_ordinal(gen_time_ord)", "def random_datetime():\n era = random.choice(range(len(telisaran.Era.years)))\n max_year = 20000 if era == 2 else telisaran.Era.years[era]\n return telisaran.datetime(\n era=era + 1,\n year=random.choice(range(1, max_year + 1)),\n season=random.choice(range(1, telisaran.Year.length_in_seasons + 1)),\n day=random.choice(range(1, telisaran.Season.length_in_days + 1)),\n hour=random.choice(range(24)),\n minute=random.choice(range(60)),\n second=random.choice(range(60))\n )", "def gen_Date_Time():\n month = randint(1, 12)\n day = randint(1, 30)\n year = randint(2009, 2019)\n return (str(month) + '-' + str(day) + '-' + str(year))", "def randInt(start, stop):\n \n return random.randint(start, stop)", "def generate_random_number(start=0, end=100):\n return randint(start, end)", "def get_random_time_segment(segment_ms):\n segment_start=np.random.randint(low=0,high=10000-segment_ms)\n segment_end=segment_start+segment_ms-1\n return (segment_start,segment_end)", "def date_of_order(year=2020, month=1, day=1):\n start_date = datetime(year=year, month=month, day=day, hour=00, minute=00)\n end_date = datetime.now()\n time_between_dates = end_date - start_date\n int_delta = (time_between_dates.days * 24 * 60 * 60) + time_between_dates.seconds\n random_seconds = randrange(int_delta)\n random_date = start_date + timedelta(seconds=random_seconds)\n return random_date.strftime(\"%Y-%m-%d %H:%M:%S\")", "def randint(self, start, stop):\n return self.randrange(start, stop + 1)", "def random_start(self):\n a, b = self.interval\n return a + (b - a) * rn.random_sample()", "def randrange(start, stop, step=1):\n if start == stop:\n return start\n else:\n return randrange(start, stop, step)", "def random(start: t.Union[float, int] = 0, stop: t.Union[float, int] = 1, floating: bool = False):\n floating = isinstance(start, float) or isinstance(stop, float) or floating is True\n\n if stop < start:\n stop, start = start, stop\n\n if floating:\n rnd = uniform(start, stop)\n else:\n rnd = randint(start, stop) # type: ignore\n\n return rnd", "def random_date(self):\n stime = time.mktime(time.strptime('01/01/1981', '%m/%d/%Y'))\n etime = time.mktime(time.strptime('01/01/2100', '%m/%d/%Y'))\n\n ptime = stime + random.random() * (etime - stime)\n\n return time.strftime('%m/%d/%Y', time.localtime(ptime))", "def get_time(lower, upper):\n\n hour = random.randint(lower, upper)\n minute = random.randint(0, 60)\n go_time = str(hour) + ':' + str(minute)\n\n return go_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an image of the frequency spectrum of the system as a function of ng.
def frequency_spectrum(self, ngs, freqs, initial_states=[0], kappa=0.1, branch='none'): spectrum = np.zeros((len(ngs), len(freqs))) for (n, ng) in enumerate(ngs): self.ng = ng evals, evecs = self.energies(branch, return_evecs=True) g = self.dipole_matrix_elements(evecs) for i in initial_states: omegas = evals[i+1:] - evals[i] for (j, omega) in enumerate(omegas): spectrum[n] += (0.25 * np.abs(g[i,i+j+1])**2 * kappa**2 / ((freqs-omega)**2 + 0.25 * kappa**2)) return spectrum
[ "def spectrum(self):\n c_ratio = self._count_rates['flux_ratio']\n return c_ratio + np.random.randn(len(c_ratio)) * self.sigma", "def get_freqs(Fs, n):\n\n return np.linspace(0, Fs / 2, int(n / 2 + 1))", "def output_frequency(y):\n global _prev_frequency\n y = np.copy(interpolate(y, settings.No_Of_Leds // 2))\n common_mode.update(y)\n diff = y - _prev_frequency\n _prev_frequency = np.copy(y)\n # Color channel mappings\n r = r_filt.update(y - common_mode.value)\n g = np.abs(diff)\n b = b_filt.update(np.copy(y))\n # Mirror the color channels for symmetric output\n r = np.concatenate((r[::-1], r))\n g = np.concatenate((g[::-1], g))\n b = np.concatenate((b[::-1], b))\n output = np.array([r, g,b]) * 255\n return output", "def gen_frequency(cube):\n if not isinstance(cube, fits.hdu.image.ImageHDU):\n try:\n cube = cube['image']\n except:\n raise ValueError('func : ``gen_frequency`` var : ``cube`` must '\n 'have ImageHDU.')\n\n header = cube.header\n return WCS(header).all_pix2world(0, 0, range(header['NAXIS3']), 0)[-1]", "def frequency2angular(f):\n return 2*np.pi*f", "def IntensityAtFreq(self,freq):\n return 0 # TO REPLACE WITH YOUR CODE", "def direct_vector_strength_spectrum(event_times, frequencies):\n ret = np.asarray([1-var( (event_times % (1./w) )*w*2*np.pi ) for w in frequencies])\n\n return ret", "def spectrum(self):\n return self", "def plot_power_spectrum(self):\n # convert the frame to grayscale if necessary\n if len(self.frame_orig.shape) > 2:\n frame = cv2.cvtColor(self.frame_orig, cv2.COLOR_BGR2GRAY)\n else:\n frame = self.frame_orig\n\n # expand the image to an optimal size for FFT\n rows, cols = self.frame_orig.shape[:2]\n nrows = cv2.getOptimalDFTSize(rows)\n ncols = cv2.getOptimalDFTSize(cols)\n frame = cv2.copyMakeBorder(frame, 0, ncols - cols, 0, nrows - rows,\n cv2.BORDER_CONSTANT, value=0)\n\n # do FFT and get log-spectrum\n if self.use_numpy_fft:\n img_dft = np.fft.fft2(frame)\n spectrum = np.log10(np.real(np.abs(img_dft))**2)\n else:\n img_dft = cv2.dft(np.float32(frame), flags=cv2.DFT_COMPLEX_OUTPUT)\n spectrum = np.log10(img_dft[:, :, 0]**2+img_dft[:, :, 1]**2)\n\n # radial average\n L = max(frame.shape)\n freqs = np.fft.fftfreq(L)[:L/2]\n dists = np.sqrt(np.fft.fftfreq(frame.shape[0])[:, np.newaxis]**2 +\n np.fft.fftfreq(frame.shape[1])**2)\n dcount = np.histogram(dists.ravel(), bins=freqs)[0]\n histo, bins = np.histogram(dists.ravel(), bins=freqs,\n weights=spectrum.ravel())\n\n centers = (bins[:-1] + bins[1:]) / 2\n plt.plot(centers, histo/dcount)\n plt.xlabel('frequency')\n plt.ylabel('log-spectrum')\n plt.show()", "def frequencies(self):\n check_is_fitted(self, \"model\")\n dt = self.time[\"dt\"]\n return np.imag(np.log(self.eigenvalues) / dt) / (2 * np.pi)\n # return self.model.steps[-1][1].frequencies_", "def spectrum(self):\n return scipy.fft.fft(self.values)", "def compute_energy_spectrum(wave_spectral_density, gravity, density):\n return wave_spectral_density * gravity * density", "def Intensity(spl):\n return np.power(10.0,(spl-96.0)/10.0) # intensity value from SPL", "def _mag2fluxdensity(mag,band,unit='Jy'):\n from astropy import units\n _mag = -mag/2.5\n f0 = _zeropoint(band)\n _w = wavelength(band,'angstrom')\n f = (f0 * 10**_mag) * (_w/_w.to('Hz',units.spectral()))\n return f.to(unit)", "def Intensity(spl):\n #temp=spl\n #spl=np.array([(lambda x: float(x))(ii) for ii in temp])\n intfin=10**((spl-96)/10)\n return intfin", "def assign_spectral_res(plot=False):\n dist = Distribution(gaussian(0.5, 0.25, np.linspace(-0.2, 1.2, mp.res_elements)), interpolation=True)\n dprint(f\"Mean R = {mp.R_mean}\")\n Rs = (dist(mp.array_size[0]*mp.array_size[1])[0]/float(mp.res_elements)-0.5)*mp.R_sig + mp.R_mean#\n if plot:\n plt.xlabel('R')\n plt.ylabel('#')\n plt.hist(Rs)\n plt.show()\n Rs = np.reshape(Rs, mp.array_size)\n # plt.imshow(Rs)\n # plt.show()\n return Rs", "def power_spectrum(self, img: np.ndarray) -> np.ndarray:\n return np.power(np.abs(img), 2)", "def pspec(psd2, return_index=True, wavenumber=False, return_stddev=False, azbins=1, binsize=1.0, view=False, **kwargs):\n #freq = 1 + numpy.arange( numpy.floor( numpy.sqrt((image.shape[0]/2)**2+(image.shape[1]/2)**2) ) )\n\n azbins,(freq,zz) = azimuthalAverageBins(psd2,azbins=azbins,interpnan=True, binsize=binsize, **kwargs)\n if len(zz) == 1: zz=zz[0]\n # the \"Frequency\" is the spatial frequency f = 1/x for the standard numpy fft, which follows the convention\n # A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n # or\n # F_f = Sum( a_m e^(-2 pi i f x_m) over the range m,m_max where a_m are the values of the pixels, x_m are the\n # indices of the pixels, and f is the spatial frequency\n freq = freq.astype('float') # there was a +1.0 here before, presumably to deal with div-by-0, but that shouldn't happen and shouldn't have been \"accounted for\" anyway\n\n if return_index:\n if wavenumber:\n fftwavenum = (numpy.fft.fftfreq(zz.size*2)[:zz.size])\n return_vals = list((fftwavenum,zz))\n #return_vals = list((len(freq)/freq,zz))\n else:\n return_vals = list((freq,zz))\n # return_vals = list((freq/len(freq),zz))\n else:\n return_vals = list(zz)\n if return_stddev:\n zzstd = azimuthalAverageBins(psd2,azbins=azbins,stddev=True,interpnan=True, binsize=binsize, **kwargs)\n return_vals.append(zzstd)\n\n if view and pyplotOK:\n pyplot.loglog(freq,zz)\n pyplot.xlabel(\"Spatial Frequency\")\n pyplot.ylabel(\"Spectral Power\")\n\n return return_vals", "def power_spectrum(self, dt = 0.025, variance = 0.04, a1 = 0.1, P0 = 0.5, sigma = 0.05, view = False):\n\n f_nyq = 1/(2*dt)\n f = np.linspace(0, f_nyq, 1000)\n\n def model_noise_markow(f, dt, variance, a1):\n Pn = variance*dt/(np.abs(1 - a1*np.exp(-1j*2*np.pi*f*dt))**2)\n\n return Pn\n\n def model_Ps(f, P0, diff_length):\n Ps = P0*np.exp(-(2*np.pi*f*diff_length)**2)\n\n return Ps\n\n\n ps = model_Ps(f, P0, sigma)\n noise = model_noise_markow(f, dt, variance, a1)\n spectrum = ps + noise\n\n k2 = (np.pi*2*f)**2\n\n\n if view == True:\n plt.clf()\n\n plt.figure(432)\n plt.semilogy(f, spectrum, \"b\", linewidth = 4)\n plt.semilogy(f, noise, \"r\", linewidth = 2)\n plt.semilogy(f, ps, \"k\", linewidth = 2)\n plt.ylim(ymin = 1e-7*P0)\n\n\n\n results = {\"f\": f, \"k2\": k2, \"P\": spectrum, \"Ps\": ps, \"Nf\": noise}\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post a comment. HTTP POST is required. If ``POST['submit'] == "preview"`` or if there are errors a preview template, ``comments/preview.html``, will be rendered.
def post_comment(request, next=None, using=None): # Fill out some initial data fields from an authenticated user, if present data = request.POST.copy() if request.user.is_authenticated: if not data.get('name', ''): data["name"] = request.user.get_full_name() or request.user.get_username() if not data.get('email', ''): data["email"] = request.user.email # Look up the object we're trying to comment about ctype = data.get("content_type") object_pk = data.get("object_pk") if ctype is None or object_pk is None: return CommentPostBadRequest("Missing content_type or object_pk field.") try: model = apps.get_model(*ctype.split(".", 1)) target = model._default_manager.using(using).get(pk=object_pk) except TypeError: return CommentPostBadRequest( "Invalid content_type value: %r" % escape(ctype)) except AttributeError: return CommentPostBadRequest( "The given content-type %r does not resolve to a valid model." % escape(ctype)) except ObjectDoesNotExist: return CommentPostBadRequest( "No object matching content-type %r and object PK %r exists." % ( escape(ctype), escape(object_pk))) except (ValueError, ValidationError) as e: return CommentPostBadRequest( "Attempting go get content-type %r and object PK %r exists raised %s" % ( escape(ctype), escape(object_pk), e.__class__.__name__)) # Do we want to preview the comment? preview = "preview" in data # Construct the comment form form = django_comments.get_form()(target, data=data) # Check security information if form.security_errors(): return CommentPostBadRequest( "The comment form failed security verification: %s" % escape(str(form.security_errors()))) # If there are errors or if we requested a preview show the comment if form.errors or preview: template_list = [ # These first two exist for purely historical reasons. # Django v1.0 and v1.1 allowed the underscore format for # preview templates, so we have to preserve that format. "comments/%s_%s_preview.html" % (model._meta.app_label, model._meta.model_name), "comments/%s_preview.html" % model._meta.app_label, # Now the usual directory based template hierarchy. "comments/%s/%s/preview.html" % (model._meta.app_label, model._meta.model_name), "comments/%s/preview.html" % model._meta.app_label, "comments/preview.html", ] return render(request, template_list, { "comment": form.data.get("comment", ""), "form": form, "next": data.get("next", next), }, ) # Otherwise create the comment comment = form.get_comment_object(site_id=get_current_site(request).id) comment.ip_address = request.META.get("REMOTE_ADDR", None) or None if request.user.is_authenticated: comment.user = request.user # if data.get("comment_perent", None) is not None: # comm_1 = django_comments.get_model().get(id=data.get("comment_perent")) # comment.comment = comm_1.user.username + ', ' + comment.comment # form.move_to(comm_1) # Signal that the comment is about to be saved responses = signals.comment_will_be_posted.send( sender=comment.__class__, comment=comment, request=request ) for (receiver, response) in responses: if response is False: return CommentPostBadRequest( "comment_will_be_posted receiver %r killed the comment" % receiver.__name__) # Save the comment and signal that it was saved comment.save() if data.get("comment_parent", None) is not None and data.get("comment_parent") != '': comm_1 = django_comments.get_model().objects.get(id=data.get("comment_parent")) # comment.comment = comm_1.user.username + ', ' + comment.comment comment.move_to(comm_1) signals.comment_was_posted.send( sender=comment.__class__, comment=comment, request=request ) return next_redirect(request, fallback=next or 'comments-comment-done', c=comment._get_pk_val())
[ "def post_comment():\n\n path = request.args.get('path', '')\n comment_id = request.args.get('comment_id')\n data = request.get_json()\n\n post = (db_session.query(Post)\n .filter(Post.path == path)\n .first())\n\n if not post:\n raise Exception('Unable to find post')\n\n if comment_id:\n comment = (db_session.query(Comment)\n .filter(Comment.id == comment_id)\n .first())\n else:\n comment = Comment(post_id=post.id)\n comment.text = escape(data['text'])\n comment.user_id = current_user.id\n db_session.add(comment)\n db_session.commit()\n\n send_comment_email(path=path,\n commenter=current_user.format_name,\n comment_text=data['text'])\n return \"OK\"", "def commentPost(self, request, context, params, entity, **kwargs):\n\n post_dict = request.POST\n\n if post_dict.get('subscribe') or post_dict.get('unsubscribe'):\n self._handleSubscribePost(request, entity)\n return http.HttpResponseRedirect('')\n\n form = params['public_review_form'](post_dict)\n\n if not form.is_valid():\n # get some entity specific context\n self.updateCommentContext(context, entity, params)\n\n # return the invalid form response\n return self._constructResponse(request, entity=entity, context=context,\n form=form, params=params, template=params['comment_template'])\n\n # get the commentary\n fields = form.cleaned_data\n comment = fields['comment']\n\n if comment:\n # create the review\n self._createReviewFor(entity, comment, is_public=True)\n\n # redirect to the same page\n return http.HttpResponseRedirect('')", "def show_comment_form(self, req, page):\n page_id = self.env.get_real_filename(page)[:-4]\n ajax_mode = req.args.get('mode') == 'ajax'\n target = req.args.get('target')\n page_comment_mode = not target\n\n form_error = preview = None\n title = req.form.get('title', '').strip()\n if 'author' in req.form:\n author = req.form['author']\n else:\n author = req.session.get('author', '')\n if 'author_mail' in req.form:\n author_mail = req.form['author_mail']\n else:\n author_mail = req.session.get('author_mail', '')\n comment_body = req.form.get('comment_body', '')\n fields = (title, author, author_mail, comment_body)\n\n if req.method == 'POST':\n if req.form.get('preview'):\n preview = Comment(page_id, target, title, author, author_mail,\n comment_body)\n # 'homepage' is a forbidden field to thwart bots\n elif req.form.get('homepage') or self.antispam.is_spam(fields):\n form_error = 'Your text contains blocked URLs or words.'\n else:\n if not all(fields):\n form_error = 'You have to fill out all fields.'\n elif _mail_re.search(author_mail) is None:\n form_error = 'You have to provide a valid e-mail address.'\n elif len(comment_body) < 20:\n form_error = 'You comment is too short ' \\\n '(must have at least 20 characters).'\n else:\n # '|none' can stay since it doesn't include comments\n self.cache.pop(page_id + '|inline', None)\n self.cache.pop(page_id + '|bottom', None)\n comment = Comment(page_id, target,\n title, author, author_mail,\n comment_body)\n comment.save()\n req.session['author'] = author\n req.session['author_mail'] = author_mail\n if ajax_mode:\n return JSONResponse({'posted': True, 'error': False,\n 'commentID': comment.comment_id})\n return RedirectResponse(comment.url)\n\n output = render_template(req, '_commentform.html', {\n 'ajax_mode': ajax_mode,\n 'preview': preview,\n 'suggest_url': '@edit/%s/' % page,\n 'comments_form': {\n 'target': target,\n 'title': title,\n 'author': author,\n 'author_mail': author_mail,\n 'comment_body': comment_body,\n 'error': form_error\n }\n })\n\n if ajax_mode:\n return JSONResponse({\n 'body': output,\n 'error': bool(form_error),\n 'posted': False\n })\n return Response(render_template(req, 'commentform.html', {\n 'form': output\n }))", "def submit_comment(self, post_id, content):\n self._reddit.submission(post_id).reply(content)", "def create_or_edit_comment(request, pk=None):\n comment = get_object_or_404(Comment, pk=pk) if pk else None\n if request.method == \"POST\":\n form = CommentPostForm(request.POST, request.FILES, instance=comment)\n if form.is_valid():\n comment = form.save()\n return redirect(comment_detail, comment.pk)\n else:\n form = CommentPostForm(instance=comment)\n return render(request, 'commentpostform.html', {'form': form})", "def post_comment_pl():\r\n\tinsert_comment(request.form['Username'], request.form['Comment'])\r\n\t\r\n\treturn redirect(url_for('Pl'))", "def postComment(request, blog_id):\n\t\n\tblog = get_object_or_404(Blog, pk=blog_id)\n\tif request.POST['content']:\t\n\t\tcomment = blog.comment_set.create(content=request.POST['content'], rating=request.POST['rating'])\n\t\tcomment.save()\n\t\treturn HttpResponseRedirect(reverse('dblog.views.comment', args=(blog.id,)))\n\telse:\n\t\t return HttpResponse('Please Add Some Comment')", "def post_reply(assignment_name, file_name, comment_id):\n\t\t# grab user input from submitted form\n\t\tcomment_data = request.form['comment']\n\t\tcomment = Comment(file_name, comment_id, comment_data)\n\t\t# apply filter to comment\n\t\tcomment.apply_filter()\n\t\t# propogate changes to db\n\t\tdb.session.add(comment)\n\t\tdb.session.commit()\n\t\t# re-display the file page.\n\t\treturn get_file(assignment_name, file_name)", "def comment(thread_uid):\n thread = storage.get_thread(thread_uid)\n if not thread:\n abort(404)\n\n text = request.form.get('text') or ''\n if not text:\n return error('comment:text')\n\n storage.add_comment(thread_uid, g.username, text)\n flash('Your comment successfully added!', 'success')\n\n return redirect(url_for('comments', thread_uid=thread_uid))", "def new_comment(self, comment_content, author, post_url, post_title):\n url = self.__build_blog_post_url(self.request, post_url)\n self.msg_plain =\\\n render_to_string('email/new_comment_plain.txt', {'content': comment_content, 'blog_post_url': url})\n self.msg_html = \\\n render_to_string('email/new_comment_html.html',\n {'content': comment_content, 'blog_post_url': url, 'title': post_title})\n\n self.email_to = 'info@buscandolaidea.com'\n self.subject = 'Comentario de ' + author\n self.__send_email()", "def post_comment(comment, bug_id):\n success = bz.notify_bug(comment, bug_id)\n if success:\n log_msg('Posted comment: \"%s\" to %s' % (comment, bug_id))\n else:\n log_msg('Could not post comment to bug %s. Adding to comments table'\n % (bug_id))\n cmnt = Comment(comment=comment, bug=bug_id)\n db.CommentInsert(cmnt)", "def post_comment(assignment_name, file_name):\n\t\t# grab user input from submitted form\n\t\tcomment_data = request.form['comment']\n\t\tcomment = Comment(file_name, None, comment_data)\n\t\t# apply filter to comment\n\t\tcomment.apply_filter()\n\t\t# propogate changes to db\n\t\tdb.session.add(comment)\n\t\tdb.session.commit()\n\t\t# re-display the file page.\n\t\treturn get_file(assignment_name, file_name)", "def post(self):\n\n db = get_db()\n if 'author_id' not in request.form:\n raise RequestError(422, 'author_id required')\n elif 'blog_id' not in request.form:\n raise RequestError(422, 'blog_id required')\n elif 'content' not in request.form:\n raise RequestError(422, 'content required')\n else:\n verify_account_by_id(request.form['author_id'])\n insert = db.insert_comment(request.form['blog_id'],\n request.form['author_id'],\n request.form['content'])\n if insert is None:\n raise RequestError(404, 'blog_id not found')\n else:\n response = jsonify(insert)\n\n return response", "def handle_comment_form(request, user_last_post):\n if request.method == 'POST':\n form = handle_comment(request, user_last_post)\n\n else:\n form = CommentForm()\n\n set_humanity_check(request)\n form.humanity = translate_humanity(request)\n form.js_check = request.session['random_number']\n\n return form", "def comment(request):\n if request.POST:\n recipe_id = request.POST.getone('recipe_id')\n text = request.POST.getone('comment_text')\n try:\n creation_time = request.POST.getone('creation_time')\n except KeyError:\n creation_time = None\n\n #if time is present, update\n if creation_time:\n comment = Comment.fetch((request.user.id, recipe_id,\n creation_time))\n comment.text = text\n comment.save()\n\n #otherwise create\n else:\n comment = Comment.construct_from_dict({'text': text}, request.user)\n try:\n recipe = Recipe.fetch(recipe_id)\n recipe.comments.append(comment)\n recipe.save()\n\n #invalid comment\n except AttributeError:\n raise HTTPError\n\n return {'comment': comment,\n 'can_edit': True}", "def test_post_as_comment_when_post_provided(self):\n with patch.object(requests, 'post') as mocked_post:\n mocked_post.return_value = _build_response_obj('comment_unfetched')\n retval = self.c.post(self.post_node, self.message)\n mocked_post.assert_called_with(\n '{0}/{1}/comments?access_token={2}'.format(BASE_URL,\n self.post_node['id'],\n self.access_token),\n data=self.default_post_params\n )", "def create_comment(self, card_id_or_short_link, comment):\n return self.request(\n method=\"POST\", path=ADD_COMMENT_PATH % card_id_or_short_link, params={\"text\": comment}\n )", "def show_post(request, str):\n\n # It's guaranteed to always have a unique header.\n post = get_object_or_404(Post, header=str)\n\n comments = post.comment_set.order_by(\"-comment_date\")\n # Save a comment to given post.\n if (request.method == \"POST\"):\n # Because we add a few things at the comment creation,\n # we pass it with the data argument.\n comment_form = CommentForm(data={\n \"post\": post,\n \"comment_date\": datetime.now(),\n \"author\": request.user,\n \"content\": request.POST[\"content\"],\n \"reply\": request.POST[\"reply\"],\n })\n\n if (comment_form.is_valid()):\n comment_form.save()\n comment_form = CommentForm()\n else :\n comment_form = CommentForm()\n\n return render(request, \"detail.html\", {\n \"post\": post,\n \"comment_form\": comment_form,\n \"comments\": comments\n })", "def post_submit():\n # Forbid submission of post if user is not logged in\n if not check.logged_in():\n error_context = {\n 'error_name': \"403 Forbidden\",\n 'error_info': \"You may not post without an account. Please log in or create an account\"\n }\n return render_template('error.html', **error_context)\n # User is logged in, show text submission form\n else:\n form = TextPostForm()\n\n if form.validate_on_submit():\n post = Post()\n post.user_id = int(session['user_id'])\n post.date = datetime.now()\n post.title = form.title.data\n post.content_type = form.content_type.data\n post.content = form.content.data\n post.content_html = md.render(form.content.data)\n # TODO: Implement external links\n post.is_external = False\n post.current_vote = 0\n post.is_banned = False\n post.comment_count = 0\n # TODO: Implement tag existance check\n # This should be done with custom validator after tags are created\n try:\n tag = Tag(form.tag.data)\n print(form.tag.data)\n post.tag_id = tag.id\n except NotImplementedError as error:\n error_context = {\n 'error_name': \"INVALID TAG\",\n 'error_info': \"the tag you entered is invalid\"\n }\n return render_template('error.html', **error_context)\n\n post.save()\n\n flash('Post created sucessfully')\n return redirect(url_for('post_pages.post_view', post_id=post.id))\n \n else:\n return render_template('post_text_submit.html', form=form)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
data = f_read_raw_mat(filename, col, data_format='float', end='l') Read the binary data from filename Return data, which is a (N, col) array input
def f_read_raw_mat(filename, col, data_format='f4', end='l'): f = open(filename,'rb') if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype((data_format,(col,))) data = np.fromfile(f,dtype=datatype) f.close() if data.ndim == 2 and data.shape[1] == 1: return data[:,0] else: return data
[ "def _read_binary_matrix(filename):\n with tf.gfile.GFile(filename, \"rb\") as f:\n s = f.read()\n magic = int(np.frombuffer(s, \"int32\", 1))\n ndim = int(np.frombuffer(s, \"int32\", 1, 4))\n eff_dim = max(3, ndim)\n raw_dims = np.frombuffer(s, \"int32\", eff_dim, 8)\n dims = []\n for i in range(0, ndim):\n dims.append(raw_dims[i])\n\n dtype_map = {507333717: \"int8\",\n 507333716: \"int32\",\n 507333713: \"float\",\n 507333715: \"double\"}\n data = np.frombuffer(s, dtype_map[magic], offset=8 + eff_dim * 4)\n data = data.reshape(tuple(dims))\n return data", "def load_mat_from_bin(filename, dtype, shape):\n\n f = open(filename, 'rb')\n byte_array = f.read()\n f.close()\n np_array = np.frombuffer(byte_array, dtype=dtype)\n np_array = np_array.reshape(shape)\n return np_array", "def fread(f, n, dtype):\n if dtype is np.str:\n dt=np.uint8\n else:\n dt=dtype\n \n data_array=np.fromfile(f, dt, n)\n #data_array.shape=(n,1)\n return data_array", "def get_data(file,cols=0,nrows='all'):\n if type(cols)==type(0):\n cols=(cols,)\n nvar=1\n else: nvar=len(cols)\n\n data=get_str(file,cols,nrows)\n\n if nvar==1: \n return numpy.array(map(float,data))\n else:\n data=list(data)\n for j in range(nvar): \n data[j]=numpy.array(map(float,data[j]))\n return tuple(data)", "def _read_matrix_data(self, header, byte_buffer):\n dt, sign, _ = self.type_dict[header.data_type]\n offset = 4 + 4 * len(header.dimensions)\n matrix = np.frombuffer(byte_buffer, dtype=dt, offset=offset)\n return matrix.reshape(header.dimensions).newbyteorder('>')", "def read_PETSc_mat_dense(file):\n # open file\n # omit header\n # read length\n # read values\n # close file\n if not os.path.exists(file):\n raise IOError(\"%s not found.\" % file)\n f = open(file, \"rb\")\n # omit header\n numpy.fromfile(f, dtype=\">i4\", count=1)\n # read dims\n nx = numpy.fromfile(f, dtype=\">i4\", count=1)\n ny = numpy.fromfile(f, dtype=\">i4\", count=1)\n format = numpy.fromfile(f, dtype=\">i4\", count=1)\n val = numpy.fromfile(f, dtype=\">f8\", count=(ny[0]*nx[0]))\n\n # print(\"dims\")\n # print( nx, ny)\n # print(\"nnz\")\n # print (nnz)\n # print (\"nrow\")\n # print (nrow,nrow.shape)\n # print (\"colidx\")\n # print (colidx,colidx.shape)\n # print (\"val\")\n # print (val)\n\n # close file\n f.close()\n #create full matrix\n mat = numpy.zeros(shape=(nx[0], ny[0]), dtype=numpy.float_)\n offset = 0\n for i in range(nx[0]):\n for j in range(ny[0]):\n mat[i, j] = val[offset]\n offset = offset + 1\n #print (numpy.nonzero(lsmfull),i,j,offset,val[offset] )\n return mat", "def loadraw(fname):\n if op.splitext(fname)[1] == '.npy':\n a = np.load(fname)\n else:\n a = np.loadtxt(fname)\n desc = unpack(DESC, a.byteswap().data[0:346])\n return desc, np.fromstring(a.data[346:], dtype = 'int16')", "def readcol(filename, **kwargs):\n f = np.genfromtxt(filename, dtype=None, **kwargs)\n\n t = type(f[0])\n if t == np.ndarray or t == np.void: # array or structured array\n f = map(np.array, zip(*f))\n\n # In Python 3.x all strings (e.g. name='NGC1023') are Unicode strings by defauls.\n # However genfromtxt() returns byte strings b'NGC1023' for non-numeric columns.\n # To have the same behaviour in Python 3 as in Python 2, I convert the Numpy\n # byte string 'S' type into Unicode strings, which behaves like normal strings.\n # With this change I can read the string a='NGC1023' from a text file and the\n # test a == 'NGC1023' will give True as expected.\n\n if sys.version >= '3':\n f = [v.astype(str) if v.dtype.char=='S' else v for v in f]\n\n return f", "def readmask(filename, fieldname=None):\n f = scipy.io.loadmat(filename)\n if fieldname is not None:\n return f[fieldname].astype(np.uint8)\n else:\n validkeys = [\n k for k in list(f.keys()) if not (k.startswith('_') and k.endswith('_'))]\n if len(validkeys) < 1:\n raise ValueError('mask file contains no masks!')\n if len(validkeys) > 1:\n raise ValueError('mask file contains multiple masks!')\n return f[validkeys[0]].astype(np.uint8)", "def _read_matrix(matrix_file):\n matrix = numpy.loadtxt(matrix_file, dtype='float')\n return matrix", "def get_data(fname, cols=0, nrows='all', sep=None):\n if isinstance(cols, int):\n cols = (cols,)\n nvar = 1\n else:\n nvar = len(cols)\n\n data = get_str(fname, cols, nrows, sep=sep)\n\n if nvar == 1:\n return numpy.array(map(float, data))\n\n data = list(data)\n for j in range(nvar):\n data[j] = numpy.array(map(float, data[j]))\n return tuple(data)", "def dataExtract2col(filename, cols=(0, 1)):\n return np.loadtxt(filename, usecols=cols, unpack=True)", "def cam_read(filename): #Adapted from sintel_io.py from http://sintel.is.tue.mpg.de/depth\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n N = np.append(N, [[0,0,0,1]], axis=0)\n\n return M,N", "def GSLIB2ndarray(data_file, kcol, nx, ny):\n if ny > 1:\n array = np.ndarray(shape=(ny, nx), dtype=float, order=\"F\")\n else:\n array = np.zeros(nx)\n\n with open(data_file) as f:\n head = [next(f) for _ in range(2)] # read first two lines\n line2 = head[1].split()\n ncol = int(line2[0]) # get the number of columns\n\n for icol in range(ncol): # read over the column names\n head = next(f)\n if icol == kcol:\n col_name = head.split()[0]\n if ny > 1:\n for iy in range(ny):\n for ix in range(0, nx):\n head = next(f)\n array[ny - 1 - iy][ix] = head.split()[kcol]\n else:\n for ix in range(nx):\n head = next(f)\n array[ix] = head.split()[kcol]\n return array, col_name", "def load(self, filename):\n\n # Separate first line from the rest\n with open(filename) as f:\n dimline, *datalines = f.readlines()\n\n mat = [list(map(int, line.split())) for line in datalines]\n dim = tuple(map(int, dimline.split()))\n\n return mat, dim", "def read_mat_7_3(mat_file):\n import digitStruct #Use sarahrn/Py-Gsvhn-DigiStruct-Reader to decode file\n objectList = []\n x_pix = []\n y_pix = []\n for dsObj in digitStruct.yieldNextDigitStruct(mat_file): #Only call to digiStruct\n label = ''\n bounding = []\n for bbox in dsObj.bboxList:\n label += str(bbox.label)\n boundBox = (bbox.label, bbox.left, bbox.top, bbox.width, bbox.height)\n bounding.append(boundBox)\n try:\n image_name = mat_file.split('\\\\')[0] + '\\\\' + dsObj.name\n image = cv2.imread(image_name, 0)\n if isinstance(image, np.ndarray):\n y = len(image)\n x = len(image[0])\n x_pix.append(x)\n y_pix.append(y)\n data = (image_name, x, y, bounding, label) \n objectList.append(data)\n except IOError as e:\n print('Could not read:', image_name, ':', e, '- it\\'s ok, skipping.')\n data_len = len(objectList)\n x = max(x_pix)\n y = max(y_pix)\n print(data_len, x, y)\n dataset = np.ndarray((data_len, 2), dtype='|S16')\n bbox_set = np.ndarray((data_len, 6, 5), dtype=np.int16)\n sizes = np.ndarray((data_len, 2), dtype=np.int16)\n for s, sample in enumerate(objectList):\n dataset[s, 0] = sample[0]\n dataset[s, 1] = sample[4]\n sizes[s, 0] = sample[1]\n sizes[s, 1] = sample[2]\n for b, bbox in enumerate(sample[3]):\n bbox_set[s, b, :] = bbox\n return dataset, bbox_set, sizes", "def LSIReader(filename): \n\n metadata = {}\n \n dict_args_int32 = ['pixels_per_column','pixels_per_row','channels',\n 'numeric_type_indicator','apodization_type','remap_type',\n 'image_plane_indicator']\n \n dict_args_float32 = ['rf_center_frequency','rf_bandwidth','dwell_angle',\n 'cone_angle','graze_angle','twist_angle','column_sample_spacing',\n 'row_sample_spacing','column_oversampling_factor',\n 'row_oversampling_factor','column_resolution','row_resolution']\n\n file = open(filename, \"rb\")\n file.seek(0, 2)\n file_size = file.tell()\n file.seek(0, 0)\n num = file.read(200)\n text = file.read(200)\n data = file.read(file_size - file.tell())\n file.close()\n \n for i, arg in enumerate(dict_args_int32):\n metadata[arg] = np.int32(struct.unpack('<i', num[4*i:4*i+4]))\n\n N = len(dict_args_int32) * 4\n for i, arg in enumerate(dict_args_float32):\n metadata[arg] = np.float32(struct.unpack('<f', num[N+4*i:4*i+4+N]))\n \n metadata['text_header'] = str(text, 'utf-8')\n \n \n if metadata['numeric_type_indicator'][0] == 1:\n data = np.frombuffer(data, np.float32)\n elif metadata['numeric_type_indicator'][0] == 2:\n data = np.frombuffer(data, np.complex64)\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err) \n \n data = data.reshape(metadata['pixels_per_row'][0], \n metadata['pixels_per_column'][0],\n metadata['channels'][0])\n \n return data, metadata", "def readFlow(fn):\n # Code adapted from:\n # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy\n\n # WARNING: this will work on little-endian architectures (eg Intel x86) only!\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print(magic)\n\t print('Magic number incorrect. Invalid .flo file')\n\t return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n #print 'Reading %d x %d flo file' % (w, h)\n data = np.fromfile(f, np.float32, count=2*w*h)\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h, w, 2))", "def load_data_array(fname):\n data = np.genfromtxt(fname)\n #data = np.load(fname)\n return data", "def read_digit_mat(path: str, filter_non_numeric: bool = False, nrows: int = None):\n if nrows is None:\n if filter_non_numeric:\n with open(path) as f:\n mat = np.array(\n [\n np.array([int(c) for c in re.sub(\"[^0-9]\", \"\", line.strip())])\n for line in f.readlines()\n ],\n dtype=np.int8,\n )\n else:\n with open(path) as f:\n mat = np.array(\n [np.array([int(c) for c in line.strip()]) for line in f.readlines()],\n dtype=np.int8,\n )\n else:\n assert filter_non_numeric is False\n mat = np.array(\n [\n np.array([int(c) for c in line.item()])\n for line in pd.read_csv(path, nrows=nrows, header=None).values\n ],\n dtype=np.int8,\n )\n return mat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
len = f_read_raw_mat_length(filename, data_format='f4') Read length of data, i.e., number of elements in the data file. If data is in shape (N, M), then len = N M input
def f_read_raw_mat_length(filename, data_format='f4'): f = open(filename,'rb') tmp = f.seek(0, 2) bytes_num = f.tell() f.close() if data_format == 'f4': return int(bytes_num / 4) else: return bytes_num
[ "def f_read_htk_length(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n f.close()\n \n sample_size = int(head_info['SampleSize'][0]/4)\n return sample_size", "def get_record_length(fid):\n # assume initially big endian (all input data 32-bit integers)\n dtype = np.dtype('>i4')\n value, = np.fromfile(fid, dtype=dtype, count=1)\n fid.seek(0)\n # swap to little endian and reread first line\n if (value > 100):\n dtype = np.dtype('<i4')\n value, = np.fromfile(fid, dtype=dtype, count=1)\n fid.seek(0)\n # get the number of variables\n n_blocks = value//dtype.itemsize\n # read past first record\n np.fromfile(fid, dtype=dtype, count=n_blocks)\n # return the number of variables and the endianness\n return (n_blocks, dtype)", "def load(self, filename):\n\n # Separate first line from the rest\n with open(filename) as f:\n dimline, *datalines = f.readlines()\n\n mat = [list(map(int, line.split())) for line in datalines]\n dim = tuple(map(int, dimline.split()))\n\n return mat, dim", "def max_len(filename: str) -> int:\n with open(filename, 'r') as file:\n table = [line.split() for line in file.readlines()] # 2-D array\n length = max([len(x) for row in table for x in row]) # adjust padding\n col = len(table[0])\n\n return length, col", "def f_read_htk(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n \n \"\"\"if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n \"\"\" \n if 'f' in data_format:\n sample_size = int(head_info['SampleSize'][0]/4)\n else:\n print(\"Error in read_htk: input should be float32\")\n return False\n \n datatype = np.dtype((data_format,(sample_size,)))\n data = np.fromfile(f,dtype=datatype)\n f.close()\n return data", "def size(data):\r\n return data[0].get_value(borrow=True).shape[0]", "def cam_read(filename): #Adapted from sintel_io.py from http://sintel.is.tue.mpg.de/depth\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n N = np.append(N, [[0,0,0,1]], axis=0)\n\n return M,N", "def read_ARMS_data(self, filename):\n\t\tif self.debug: print '\\t\\treadARMS.read_arms_data', filename\n\t\twith open(filename, 'r') as self.data_file: # automatically closes safely on error\n\t\t\ts = self.data_file.read()\n\n\t\tendian = '<'\n\t\toffset = [0]\n\t\theader_dtype = np.dtype('u4')\n\t\theader_dtype.newbyteorder(endian)\n\t\tmodel_name_length = np.frombuffer(s, dtype=header_dtype, offset = offset[0], count=1)[0]\n\n\t\tif model_name_length > 100: #header length too long, switch endian\n\t\t\tendian = '>'\n\t\t\tif self.debug: print '\\t\\t\\tswitching endian to ', endian\n\t\t\theader_dtype = np.dtype(endian + 'u4')\n\t\t\tmodel_name_length = np.frombuffer(s, dtype=header_dtype, offset = offset[0], count=1)[0]\n\t\t\tif self.debug: print '\\t\\t\\tlength of model_name:', model_name_length\n\n\t\tdef read_record(s, dtype, count=1, offset=None):\n\t\t\t\"\"\"offset is a mutable list\"\"\"\n\t\t\t# print 'input offset', offset[0]\n\t\t\tif offset == None:\n\t\t\t\toffset = [0]\n\t\t\telse:\n\t\t\t\t# print 'rec length:', np.frombuffer(s, dtype = header_dtype, count =1, offset =offset[0])[0], 'bytes'\n\t\t\t\toffset[0] += header_dtype.itemsize #record header\n\t\t\t\tresult = np.frombuffer(s, dtype=dtype, count=count, offset=offset[0])\n\t\t\t\toffset[0] += result.itemsize*count + header_dtype.itemsize #footer\n\t\t\treturn result\n\n\t\t#set model name\n\t\ttry:\n\t\t\tpython_model_name = read_record(s,'S'+str(model_name_length), offset=offset)[0]\n\t\texcept ValueError:\n\t\t\tprint 'S' + str(model_name_length)\n\t\t\traise \n\t\tif self.debug: print '\\t\\tpython_model_name:', python_model_name\n\t\tself.globalAttributes['python_model_name'] = Attribute('python_model_name', str(python_model_name))\n\n\t\tdt = np.dtype(endian+'f')\n\t\t#set model time\n\t\ttry:\n\t\t\tmodel_time = read_record(s, dt, offset = offset)[0]\n\t\texcept ValueError:\n\t\t\tprint dt\n\t\t\traise\n\n\t\tif self.debug: print '\\t\\tsim_time', model_time, 'offset', offset\n\t\tself.globalAttributes['sim_time'] = Attribute('sim_time', float(model_time))\n\n\t\t# get number of total blocks, leaf blocks, and new_grid_flag\n\t\t\n\t\tnum_total_blocks, num_leaf_blocks, new_grid_flag = read_record(s, endian+'i4', 3, offset = offset)\n\t\t\n\t\tif self.debug: print '\\t\\ttotal blocks, leaf blocks, new grid:', num_total_blocks, num_leaf_blocks, new_grid_flag\n\n\t\tself.globalAttributes['num_total_blocks'] = Attribute('num_total_blocks', int(num_total_blocks))\n\t\tself.globalAttributes['num_leaf_blocks'] = Attribute('num_leaf_blocks', int(num_leaf_blocks))\n\t\tself.globalAttributes['new_grid_flag'] = Attribute('new_grid_flag', int(new_grid_flag))\n\n\t\tif self.grid_type == 'Spherical_Exponential':\n\t\t\tni = self.getGlobalAttribute('RBlockSize').getAttributeValue()\n\t\t\tnj = self.getGlobalAttribute('TBlockSize').getAttributeValue()\n\t\t\tnk = self.getGlobalAttribute('PBlockSize').getAttributeValue()\n\t\telif self.grid_type == 'Cartesian':\n\t\t\tni = self.getGlobalAttribute('XBlockSize').getAttributeValue()\n\t\t\tnj = self.getGlobalAttribute('YBlockSize').getAttributeValue()\n\t\t\tnk = self.getGlobalAttribute('ZBlockSize').getAttributeValue()\n\t\telse:\n\t\t\traise ImportError('grid type not supported')\n\n\t\tself.leaf_resolution = (ni,nj,nk)\n\n\n\t\tdef create_block_datatype():\n\t\t\tdtype_list = []\n\t\t\tdtype_list.append(('tree_header', endian+'i4'))\n\t\t\tdtype_list.append(('block_loc', endian +'2i4'))\n\t\t\tdtype_list.append(('block_type', endian +'i4'))\n\t\t\tdtype_list.append(('parent_loc', endian +'2i4'))\n\t\t\tdtype_list.append(('child_loc', endian +'2,8i4')) # 8 (block#, processor#) pairs \n\t\t\tdtype_list.append(('tree_footer', endian+'i4'))\n\t\t\tdtype_list.append(('bndbox_header', endian+'i4'))\n\t\t\tdtype_list.append(('bndbox', endian+'6f4'))\n\t\t\tdtype_list.append(('bndbox_footer', endian+'i4'))\n\t\t\treturn np.dtype(dtype_list)\n\n\t\tblock_dtype = create_block_datatype()\n\n\t\t# variable_names = ['Mass_density','Velocity_R','Velocity_T','Velocity_P','Magnetic_Field_R','Magnetic_Field_T','Magnetic_Field_P']\n\n\t\tdef create_variable_datatype(self):\n\t\t\t\"\"\"creates a custom datatype to view variable data\"\"\"\n\t\t\t# this assumes order of variables in header matches data file, s.t. variableNames was initialized in the proper order.\n\t\t\tvariable_names = self.variableNames.values()\n\t\t\tdtype_list = [('header', endian+'i4')]\n\t\t\tfor var_name in variable_names: \n\t\t\t\tdtype_list.append((var_name, endian + 'f4'))\n\t\t\tdtype_list.append(('footer', endian+'i4'))\n\t\t\treturn np.dtype(dtype_list)\n\t\t\n\t\tvariable_datatype = create_variable_datatype(self)\n\n\t\tfor block_number in range(num_total_blocks): #num_total_blocks\n\t\t\tblock_data = np.frombuffer(s, dtype = block_dtype, count=1, offset = offset[0])\n\t\t\toffset[0] += block_dtype.itemsize\n\n\t\t\tblock_key = tuple(block_data['block_loc'].flatten())\n\n\t\t\tblock_type = block_data['block_type']\n\t\t\tparent_key = tuple(block_data['parent_loc'].flatten())\n\t\t\tchild_keys = tuple(block_data['child_loc'].flatten())\n\t\t\tbndbox = list(block_data['bndbox'][0].flatten())\n\n\t\t\tif self.grid_type == 'Spherical_Exponential': #flip theta bounds so 0 is at equator\n\t\t\t\tbndbox[2:4] = bndbox[3],bndbox[2] \n\t\t\t\n\t\t\t\n\t\t\tself.tree_data[block_key] = self._tree_data_tuple(block_type, parent_key, child_keys, self._bbx_tuple(*bndbox))\n\n\t\t\t# 1 = leaf, 2 = parent, 3 = grand-parent, etc\n\t\t\tif block_type == 1: \n\t\t\t\tvariables = np.frombuffer(buffer(s), dtype=variable_datatype, count = ni*nj*nk, offset = offset[0])\n\t\t\t\tvariables_reshaped = variables.reshape(ni,nj,nk, order = 'F') # 'F' avoids transpose later\n\t\t\t\tif self.grid_type == 'Spherical_Exponential': #flip theta\n\t\t\t\t\tself.leaf_data[block_key] = variables_reshaped[:,::-1,:]\n\t\t\t\telse:\n\t\t\t\t\tself.leaf_data[block_key] = variables_reshaped\n\t\t\t\toffset[0] += variable_datatype.itemsize*ni*nj*nk\n\t\t\t\n\t\t\tif parent_key == (-1, -1):\n\t\t\t\tself.roots.append( (block_key,bndbox) )\n\n\t\tself.sort_roots()\n\t\tself.set_root_ranges()\n\t\tself._data_loaded = True", "def fread(f, n, dtype):\n if dtype is np.str:\n dt=np.uint8\n else:\n dt=dtype\n \n data_array=np.fromfile(f, dt, n)\n #data_array.shape=(n,1)\n return data_array", "def read_size(file: typing.IO[bytes]) -> int:\n return struct.unpack_from(\"<L\", file.read(4))[0]", "def get_length_and_feature_number(file_name):\n max_length = 0\n min_length = 99999\n max_feature = 0\n line_no = 0\n with open(file_name) as fin:\n for line in fin:\n line_no += 1\n if line_no % 100000 == 0:\n print('%d lines finished.' % (line_no))\n fields = line.strip().split()\n X_i = map(lambda x: int(x.split(':')[0]), fields[1:])\n max_feature = max(max_feature, max(X_i))\n max_length = max(max_length, len(X_i))\n min_length = min(min_length, len(X_i))\n return min_length, max_length, max_feature", "def get_npy_lines(filename):\n\n with open(filename, 'rb') as handle:\n handle.read(10) # Skip the binary part in header\n try:\n header = handle.readline().decode()\n n_lines = int(re.findall(r'\\((\\d+), \\d+\\)', header)[0])\n except IndexError:\n print(\"Failed to parse npy header\")\n n_lines = np.load(filename).shape[0]\n\n return n_lines", "def _read_binary_matrix(filename):\n with tf.gfile.GFile(filename, \"rb\") as f:\n s = f.read()\n magic = int(np.frombuffer(s, \"int32\", 1))\n ndim = int(np.frombuffer(s, \"int32\", 1, 4))\n eff_dim = max(3, ndim)\n raw_dims = np.frombuffer(s, \"int32\", eff_dim, 8)\n dims = []\n for i in range(0, ndim):\n dims.append(raw_dims[i])\n\n dtype_map = {507333717: \"int8\",\n 507333716: \"int32\",\n 507333713: \"float\",\n 507333715: \"double\"}\n data = np.frombuffer(s, dtype_map[magic], offset=8 + eff_dim * 4)\n data = data.reshape(tuple(dims))\n return data", "def _read_molly_data(mf, fcode, npix, border):\n # skip 4 bytes at start\n mf.seek(4,1)\n\n cfrat = None\n\n if fcode == 1:\n y = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n e = None\n ylabel = 'Counts'\n yunits = ''\n\n elif fcode == 2:\n y = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n e = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n ylabel = 'Counts'\n yunits = ''\n\n elif fcode == 3:\n counts = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n errors = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n flux = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n\n cfrat = np.empty(npix, dtype=border + 'f4')\n mod = counts == 0.\n cfrat[mod] = flux[mod]\n mod = counts != 0.\n cfrat[mod] = counts[mod] / flux[mod]\n\n e = np.empty_like(errors)\n ok = cfrat > 0.\n e[ok] = errors[ok] / cfrat[ok]\n e[~ok] = -1.\n y = flux\n y[counts == 0.] = 0.\n\n ylabel = 'f\\d\\gn\\u'\n yunits = 'mJy'\n\n elif fcode == 4:\n y = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n e = None\n ylabel = 'f\\d\\gn\\u'\n yunits = 'mJy'\n\n elif fcode == 5:\n y = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n e = np.fromfile(file=mf, dtype=border + 'f4', count=npix)\n ylabel = 'f\\d\\gn\\u'\n yunits = 'mJy'\n\n else:\n raise MollyError('_read_molly_data: invalid FCODE in molly spectrum = ' + str(fcode))\n \n # skip 4 bytes at end\n mf.seek(4,1)\n \n return (dnl.Axis(ylabel, yunits, y, e), cfrat)", "def f_append_raw_mat(data, filename, data_format='f4', end='l'):\n if not isinstance(data, np.ndarray):\n print(\"Error write_raw_mat: input shoul be np.array\")\n return False\n f = open(filename,'ab')\n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f,'')\n f.close()\n return True", "def LSIReader(filename): \n\n metadata = {}\n \n dict_args_int32 = ['pixels_per_column','pixels_per_row','channels',\n 'numeric_type_indicator','apodization_type','remap_type',\n 'image_plane_indicator']\n \n dict_args_float32 = ['rf_center_frequency','rf_bandwidth','dwell_angle',\n 'cone_angle','graze_angle','twist_angle','column_sample_spacing',\n 'row_sample_spacing','column_oversampling_factor',\n 'row_oversampling_factor','column_resolution','row_resolution']\n\n file = open(filename, \"rb\")\n file.seek(0, 2)\n file_size = file.tell()\n file.seek(0, 0)\n num = file.read(200)\n text = file.read(200)\n data = file.read(file_size - file.tell())\n file.close()\n \n for i, arg in enumerate(dict_args_int32):\n metadata[arg] = np.int32(struct.unpack('<i', num[4*i:4*i+4]))\n\n N = len(dict_args_int32) * 4\n for i, arg in enumerate(dict_args_float32):\n metadata[arg] = np.float32(struct.unpack('<f', num[N+4*i:4*i+4+N]))\n \n metadata['text_header'] = str(text, 'utf-8')\n \n \n if metadata['numeric_type_indicator'][0] == 1:\n data = np.frombuffer(data, np.float32)\n elif metadata['numeric_type_indicator'][0] == 2:\n data = np.frombuffer(data, np.complex64)\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err) \n \n data = data.reshape(metadata['pixels_per_row'][0], \n metadata['pixels_per_column'][0],\n metadata['channels'][0])\n \n return data, metadata", "def num_im_data(self):\n return len(self.rec_im_data)", "def read_multi_dim_data(filename):\n dataset =[]\n\n ##from tutorial\n\n return dataset", "def f_write_raw_mat(data, filename, data_format='f4', end='l'):\n if not isinstance(data, np.ndarray):\n print(\"Error write_raw_mat: input should be np.array\")\n return False\n f = open(filename,'wb')\n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f,'')\n f.close()\n return True", "def count_data(self):\n num_data = 0\n for in_file_name in self.file_names:\n h5_file = h5py.File( in_file_name, 'r' )\n X = h5_file[self.features_name]\n if hasattr(X, 'keys'):\n num_data += len(X[ list(X.keys())[0] ])\n else:\n num_data += len(X)\n h5_file.close()\n return num_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
data = read_htk(filename, data_format='f4', end='l') Read HTK File and return the data as numpy.array input
def f_read_htk(filename, data_format='f4', end='l'): if end=='l': data_format = '<'+data_format data_formatInt4 = '<i4' data_formatInt2 = '<i2' elif end=='b': data_format = '>'+data_format data_formatInt4 = '>i4' data_formatInt2 = '>i2' else: data_format = '='+data_format data_formatInt4 = '=i4' data_formatInt2 = '=i2' head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4), ('SampleSize',data_formatInt2), ('kind',data_formatInt2)]) f = open(filename,'rb') head_info = np.fromfile(f,dtype=head_type,count=1) """if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format """ if 'f' in data_format: sample_size = int(head_info['SampleSize'][0]/4) else: print("Error in read_htk: input should be float32") return False datatype = np.dtype((data_format,(sample_size,))) data = np.fromfile(f,dtype=datatype) f.close() return data
[ "def f_read_htk_length(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n f.close()\n \n sample_size = int(head_info['SampleSize'][0]/4)\n return sample_size", "def read_hcore(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 1:\n size = int(line[0])\n hcore = np.zeros((size, size), dtype=np.float64)\n elif len(line) == 3:\n i, j, val = int(line[0])-1, int(line[1])-1, np.float64(line[2])\n hcore[i,j] = hcore[j,i] = val\n return hcore", "def read_hcore_pack(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 1:\n size = int(line[0])\n hcore = np.zeros((size, size), dtype=np.float64)\n elif len(line) == 3:\n i, j, val = int(line[0])-1, int(line[1])-1, np.float64(line[2])\n hcore[i,j] = hcore[j,i] = val\n return hcore", "def readHlist(filepath):\n\n #Check to see how many fields in hlist\n with open(filepath, 'r') as fp:\n\n l = fp.readline()\n ls = l.split(' ')\n nfields = len(ls)\n print('Number of fields in hlist {0}: {1}'.format(filepath, nfields))\n\n if nfields == 66:\n dtype = np.dtype([('scale',float),('id',int),('mvir',float),('rvir',float),('rs',float),\\\n ('vrms',float),('vmax',float), ('Rs_Klypin',float),('PX', float),\\\n ('PY', float), ('PZ', float), ('Mvir_all',float), ('M200b',float),\\\n ('M200c',float),('M500c',float),('M2500c',float),('Macc',float),\\\n ('Mpeak',float),('Vacc',float),('Vpeak',float)])\n usecols = [0,1,10,11,12,13,16,34,17,18,19,35,36,37,38,39,56,57,58,59]\n\n elif nfields == 67:\n dtype = np.dtype([('scale',float),('id',int),('mvir',float),('rvir',float),('rs',float),\\\n ('vrms',float),('vmax',float), ('Rs_Klypin',float),('PX', float),\\\n ('PY', float), ('PZ', float),('Mvir_all',float),('M200b',float),\\\n ('M200c',float),('M500c',float),('M2500c',float),('Macc',float),\\\n ('Mpeak',float),('Vacc',float),('Vpeak',float)])\n usecols = [0,1,10,11,12,13,16,18,19,20,34,35,36,37,38,39,54,55,56,57]\n\n else:\n print('Unrecognized Hlist format, check file or update readHlist with new format')\n raise Exception\n\n\n halos = np.genfromtxt(filepath,dtype=dtype,usecols=usecols)\n halos = halos[halos['id']!=0]\n\n return halos", "def test_read_data():\n data = glymur.Jp2k(AIA_193_JP2).read()\n assert isinstance(data, np.ndarray)", "def read_wtk():\n import numpy as np;\n if isfile(\"wtk.dat\"):\n wtkfile = open(\"wtk.dat\");\n else : \n print(\"Auxiliary file not found (wtk.dat). Impossible to continue.\")\n sys.exit(1)\n wtk = [];\n for line in wtkfile.readlines():\n wtk.append((float(line)));\n wtkfile.close()\n wtk = np.array(wtk);\n return wtk", "def txt_to_array(pathname, shape):\n import numpy as np\n f = open(pathname, 'r')\n data = np.array(\n [float(i) for i in f.read().split()]).reshape(shape)\n f.close()\n return data", "def load_data_array(fname):\n data = np.genfromtxt(fname)\n #data = np.load(fname)\n return data", "def FileOpen(filename):\n if filename[-4:] != \".txt\":\n filename = filename + \".txt\"\n\n data = np.array([])\n\n nlines = 0\n\n file = open(filename, \"r\") # opens on 'read' mode\n\n for line in file:\n nlines += 1\n data = np.append(data, np.fromstring(line, dtype=np.float, sep=','))\n\n file.close\n\n data = np.reshape(data, (nlines, int(data.size / nlines)))\n\n return data", "def open_granule_1km(filename):\n\n try:\n data = np.fromfile(filename, dtype=np.int16)\n data = data.reshape(18000, 43200).T\n except IOError:\n print filename + ' was not accessed.'\n data = None\n return data", "def GSLIB2ndarray(data_file, kcol, nx, ny):\n if ny > 1:\n array = np.ndarray(shape=(ny, nx), dtype=float, order=\"F\")\n else:\n array = np.zeros(nx)\n\n with open(data_file) as f:\n head = [next(f) for _ in range(2)] # read first two lines\n line2 = head[1].split()\n ncol = int(line2[0]) # get the number of columns\n\n for icol in range(ncol): # read over the column names\n head = next(f)\n if icol == kcol:\n col_name = head.split()[0]\n if ny > 1:\n for iy in range(ny):\n for ix in range(0, nx):\n head = next(f)\n array[ny - 1 - iy][ix] = head.split()[kcol]\n else:\n for ix in range(nx):\n head = next(f)\n array[ix] = head.split()[kcol]\n return array, col_name", "def twophotonHRead():\n xuvtop = os.environ['XUVTOP']\n fName = os.path.join(xuvtop, 'continuum', 'hseq_2photon.dat')\n dFile = open(fName, 'r')\n a = dFile.readline()\n y0 = np.asarray(a.split())\n a = dFile.readline()\n z0 = np.asarray(a.split())\n nz = 30\n avalue = np.zeros(nz, 'float64')\n asum = np.zeros(nz, 'float64')\n psi0 = np.zeros((nz, 17), 'float64')\n for iz in range(nz):\n a = dFile.readline().split()\n avalue[iz] = float(a[1])\n asum[iz] = float(a[2])\n psi = np.asarray(a[3:])\n psi0[iz] = psi\n dFile.close()\n return {'y0':y0, 'z0':z0, 'avalue':avalue, 'asum':asum, 'psi0':psi0.reshape(30, 17)}", "def _parse_file(self, source: str) -> np.array:\n with open(source, 'r') as f:\n data = np.loadtxt(f)\n if data.size not in (365, 366):\n raise ValueError(\n 'The file contains less than 365 or more than 366 entries.')\n\n return data", "def readFlow(fn):\n # Code adapted from:\n # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy\n\n # WARNING: this will work on little-endian architectures (eg Intel x86) only!\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print(magic)\n\t print('Magic number incorrect. Invalid .flo file')\n\t return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n #print 'Reading %d x %d flo file' % (w, h)\n data = np.fromfile(f, np.float32, count=2*w*h)\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h, w, 2))", "def READINFILE_AUTO(FileName):\n #---\n # Read the header\n #---\n FF = open(FileName,\"r\")\n HEADER = FF.readline().strip(\"#\").strip(\"\").split()\n FF.close()\n\n #---\n # Set the formats and dtype\n #---\n FORMATS = [\"S500\"] * len(HEADER)\n DTYPE = np.dtype([(HEADER[i], FORMATS[i]) for i in range(0,len(HEADER))])\n\n #---\n # Read the content\n #---\n CONTENTS = np.loadtxt(FileName, dtype=DTYPE, ndmin=1) # make sure it returns array.\n\n return HEADER, CONTENTS", "def getdata(filename, *ext, **extkeys):\n\n if 'header' in extkeys:\n _gethdr = extkeys['header']\n del extkeys['header']\n else:\n _gethdr = False\n\n hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)\n hdu = hdulist[_ext]\n _data = hdu.data\n if _data is None and isinstance(_ext, _Zero):\n try:\n hdu = hdulist[1]\n _data = hdu.data\n except IndexError:\n raise IndexError, 'No data in this HDU.'\n if _data is None:\n raise IndexError, 'No data in this HDU.'\n if _gethdr:\n _hdr = hdu.header\n hdulist.close()\n if _gethdr:\n return _data, _hdr\n else:\n return _data", "def twophotonHeRead():\n xuvtop = os.environ['XUVTOP']\n fName = os.path.join(xuvtop, 'continuum', 'heseq_2photon.dat')\n dFile = open(fName, 'r')\n a = dFile.readline()\n y0 = np.asarray(a.split())\n nz = 30\n avalue = np.zeros(nz, 'float64')\n psi0 = np.zeros((nz, 41), 'float64')\n for iz in range(1, nz):\n a = dFile.readline().split()\n avalue[iz] = float(a[1])\n psi = np.asarray(a[2:])\n psi0[iz] = psi\n dFile.close()\n return {'y0':y0, 'avalue':avalue, 'psi0':psi0.reshape(30, 41)}", "def read_h(h_file):\n h_matrix = np.zeros((3, 3), dtype=\"float\")\n for i, line in enumerate(open(h_file, \"r\").readlines()):\n h_matrix[i, :] = [float(x) for x in line.strip().split(\",\")]\n\n return h_matrix", "def data2array(filepath):\n file = open(filepath, 'r')\n skip_bill = file.readline() #skip over column name\n lines = file.readlines()\n\n lst = []\n #iterate through the lines and append to list\n for line in lines:\n line = line.strip() #get rid of the \\n\n value = float(line) #get the float value\n lst.append(value)\n\n arr = np.asarray(lst)\n return arr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
length = read_htk(filename, data_format='f4', end='l') Read HTK File and return the number of data elements in the file Read HTK File and return the data as numpy.array input
def f_read_htk_length(filename, data_format='f4', end='l'): if end=='l': data_format = '<'+data_format data_formatInt4 = '<i4' data_formatInt2 = '<i2' elif end=='b': data_format = '>'+data_format data_formatInt4 = '>i4' data_formatInt2 = '>i2' else: data_format = '='+data_format data_formatInt4 = '=i4' data_formatInt2 = '=i2' head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4), ('SampleSize',data_formatInt2), ('kind',data_formatInt2)]) f = open(filename,'rb') head_info = np.fromfile(f,dtype=head_type,count=1) f.close() sample_size = int(head_info['SampleSize'][0]/4) return sample_size
[ "def f_read_htk(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n \n \"\"\"if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n \"\"\" \n if 'f' in data_format:\n sample_size = int(head_info['SampleSize'][0]/4)\n else:\n print(\"Error in read_htk: input should be float32\")\n return False\n \n datatype = np.dtype((data_format,(sample_size,)))\n data = np.fromfile(f,dtype=datatype)\n f.close()\n return data", "def read_hcore_pack(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 1:\n size = int(line[0])\n hcore = np.zeros((size, size), dtype=np.float64)\n elif len(line) == 3:\n i, j, val = int(line[0])-1, int(line[1])-1, np.float64(line[2])\n hcore[i,j] = hcore[j,i] = val\n return hcore", "def read_hcore(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 1:\n size = int(line[0])\n hcore = np.zeros((size, size), dtype=np.float64)\n elif len(line) == 3:\n i, j, val = int(line[0])-1, int(line[1])-1, np.float64(line[2])\n hcore[i,j] = hcore[j,i] = val\n return hcore", "def readHlist(filepath):\n\n #Check to see how many fields in hlist\n with open(filepath, 'r') as fp:\n\n l = fp.readline()\n ls = l.split(' ')\n nfields = len(ls)\n print('Number of fields in hlist {0}: {1}'.format(filepath, nfields))\n\n if nfields == 66:\n dtype = np.dtype([('scale',float),('id',int),('mvir',float),('rvir',float),('rs',float),\\\n ('vrms',float),('vmax',float), ('Rs_Klypin',float),('PX', float),\\\n ('PY', float), ('PZ', float), ('Mvir_all',float), ('M200b',float),\\\n ('M200c',float),('M500c',float),('M2500c',float),('Macc',float),\\\n ('Mpeak',float),('Vacc',float),('Vpeak',float)])\n usecols = [0,1,10,11,12,13,16,34,17,18,19,35,36,37,38,39,56,57,58,59]\n\n elif nfields == 67:\n dtype = np.dtype([('scale',float),('id',int),('mvir',float),('rvir',float),('rs',float),\\\n ('vrms',float),('vmax',float), ('Rs_Klypin',float),('PX', float),\\\n ('PY', float), ('PZ', float),('Mvir_all',float),('M200b',float),\\\n ('M200c',float),('M500c',float),('M2500c',float),('Macc',float),\\\n ('Mpeak',float),('Vacc',float),('Vpeak',float)])\n usecols = [0,1,10,11,12,13,16,18,19,20,34,35,36,37,38,39,54,55,56,57]\n\n else:\n print('Unrecognized Hlist format, check file or update readHlist with new format')\n raise Exception\n\n\n halos = np.genfromtxt(filepath,dtype=dtype,usecols=usecols)\n halos = halos[halos['id']!=0]\n\n return halos", "def get_record_length(fid):\n # assume initially big endian (all input data 32-bit integers)\n dtype = np.dtype('>i4')\n value, = np.fromfile(fid, dtype=dtype, count=1)\n fid.seek(0)\n # swap to little endian and reread first line\n if (value > 100):\n dtype = np.dtype('<i4')\n value, = np.fromfile(fid, dtype=dtype, count=1)\n fid.seek(0)\n # get the number of variables\n n_blocks = value//dtype.itemsize\n # read past first record\n np.fromfile(fid, dtype=dtype, count=n_blocks)\n # return the number of variables and the endianness\n return (n_blocks, dtype)", "def open_granule_1km(filename):\n\n try:\n data = np.fromfile(filename, dtype=np.int16)\n data = data.reshape(18000, 43200).T\n except IOError:\n print filename + ' was not accessed.'\n data = None\n return data", "def test_read_data():\n data = glymur.Jp2k(AIA_193_JP2).read()\n assert isinstance(data, np.ndarray)", "def read_wtk():\n import numpy as np;\n if isfile(\"wtk.dat\"):\n wtkfile = open(\"wtk.dat\");\n else : \n print(\"Auxiliary file not found (wtk.dat). Impossible to continue.\")\n sys.exit(1)\n wtk = [];\n for line in wtkfile.readlines():\n wtk.append((float(line)));\n wtkfile.close()\n wtk = np.array(wtk);\n return wtk", "def count_observation(data_name):\n #filename = str(data_name)\n with open(data_name) as file: \n num_lines = 0\n for line in file: \n num_lines = num_lines + 1\n num_obs = num_lines/3\n return(int(num_obs))", "def get_npy_lines(filename):\n\n with open(filename, 'rb') as handle:\n handle.read(10) # Skip the binary part in header\n try:\n header = handle.readline().decode()\n n_lines = int(re.findall(r'\\((\\d+), \\d+\\)', header)[0])\n except IndexError:\n print(\"Failed to parse npy header\")\n n_lines = np.load(filename).shape[0]\n\n return n_lines", "def thd_reader(filename):\n with open(filename, 'rb') as f:\n\n # Read the header common to all file types\n metadata = read_header(f)\n \n # Interactive mode specific header\n intmode_dtype = np.dtype([\n ('CurveIndex', 'int32' ),\n ('TimeOfRecording', 'int32' ),\n ('BoardSerial', 'int32' ),\n ('CFDZeroCross', 'int32' ),\n ('CFDDiscrMin', 'int32' ),\n ('SyncLevel', 'int32' ),\n ('CurveOffset', 'int32' ),\n ('RoutingChannel', 'int32' ),\n ('SubMode', 'int32' ),\n ('MeasMode', 'int32' ),\n ('P1', 'f4' ),\n ('P2', 'f4' ),\n ('P3', 'f4' ),\n ('RangeNo', 'int32' ),\n ('Offset', 'int32' ),\n ('AcquisitionTime', 'int32' ),\n ('StopAfter', 'int32' ),\n ('StopReason', 'int32' ),\n ('SyncRate', 'int32' ),\n ('CFDCountRate', 'int32' ),\n ('TDCCountRate', 'int32' ),\n ('IntegralCount', 'int32' ),\n ('Resolution', 'f4' ),\n ('ExtDevices', 'int32' ),\n ('reserved', 'int32' )])\n intmode = np.fromfile(f, intmode_dtype, count=1)\n\n metadata.update(dict(intmode=intmode))\n \n # ...\n hist = np.fromfile(f, dtype='uint32', count=4096)\n bins = 1e-9*intmode['Resolution']*np.arange(0, 4096)\n \n return hist, bins, metadata", "def process_one_file(filename):\r\n with gzip.open(filename,\"rb\") as file:\r\n fileTailLengths = [0]*NUM_OF_HASH_FUNCTIONS\r\n for line in file: \r\n if chr(line[0])=='Q':\r\n #get the tail length for each hash function\r\n tailLengths = process_line(line[2:])\r\n #get the maximum tail length for each hash function\r\n for i in range(0,NUM_OF_HASH_FUNCTIONS):\r\n fileTailLengths[i] = max(fileTailLengths[i], tailLengths[i])\r\n return fileTailLengths", "def fread(f, n, dtype):\n if dtype is np.str:\n dt=np.uint8\n else:\n dt=dtype\n \n data_array=np.fromfile(f, dt, n)\n #data_array.shape=(n,1)\n return data_array", "def getwavelength(headers, data):\n\n # Sort data into arrays\n iw = np.where(headers == \"Wavelength\")\n wavelength = data[:,iw[1][0]]\n\n return wavelength", "def count_data(self):\n num_data = 0\n for in_file_name in self.file_names:\n h5_file = h5py.File( in_file_name, 'r' )\n X = h5_file[self.features_name]\n if hasattr(X, 'keys'):\n num_data += len(X[ list(X.keys())[0] ])\n else:\n num_data += len(X)\n h5_file.close()\n return num_data", "def getdata(filename, *ext, **extkeys):\n\n if 'header' in extkeys:\n _gethdr = extkeys['header']\n del extkeys['header']\n else:\n _gethdr = False\n\n hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)\n hdu = hdulist[_ext]\n _data = hdu.data\n if _data is None and isinstance(_ext, _Zero):\n try:\n hdu = hdulist[1]\n _data = hdu.data\n except IndexError:\n raise IndexError, 'No data in this HDU.'\n if _data is None:\n raise IndexError, 'No data in this HDU.'\n if _gethdr:\n _hdr = hdu.header\n hdulist.close()\n if _gethdr:\n return _data, _hdr\n else:\n return _data", "def _jpeg_content_length(p):\n\twith open(p, 'rb') as f:\n\t\tlast_byte = ''\n\t\theader_end_i = None\n\t\tfor i in itertools.count():\n\t\t\tcurrent_byte = f.read(1)\n\t\t\tif current_byte == b'':\n\t\t\t\tbreak\n\t\t\t# some files somehow contain multiple FF DA sequences, don't know what that means\n\t\t\tif header_end_i is None and last_byte == b'\\xff' and current_byte == b'\\xda':\n\t\t\t\theader_end_i = i\n\t\t\tlast_byte = current_byte\n\t\t# at this point, i is equal to the size of the file\n\t\treturn i - header_end_i - 2 # minus 2 because all JPEG files end in FF D0", "def test_robot_dataset_length():\n # Raw data\n path = str(Path(__file__).parents[1] / config['raw_robot_dataset'])\n data = dd.io.load(path)\n assert (len(data.keys()) == len(subjects))", "def read_tcspc_dat(fname):\n with open(fname) as f:\n # go through the first lines\n for i in range(8):\n f.readline()\n # get the steps\n steps = np.array([float(e) for e in f.readline().strip().split()])\n # dump next line\n f.readline()\n # load histogram data\n data = np.loadtxt(f)\n # return and ensure data has 2 dim\n return steps, data.reshape((-1, 1)) if data.ndim==1 else data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
flag = write_raw_mat(data, filename, data_format='f4', end='l') Write data to file on the file system as binary data input
def f_write_raw_mat(data, filename, data_format='f4', end='l'): if not isinstance(data, np.ndarray): print("Error write_raw_mat: input should be np.array") return False f = open(filename,'wb') if len(data_format)>0: if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype(data_format) temp_data = data.astype(datatype) else: temp_data = data temp_data.tofile(f,'') f.close() return True
[ "def f_append_raw_mat(data, filename, data_format='f4', end='l'):\n if not isinstance(data, np.ndarray):\n print(\"Error write_raw_mat: input shoul be np.array\")\n return False\n f = open(filename,'ab')\n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f,'')\n f.close()\n return True", "def write_data(num, data):\n file_num = \"%05d\" % num\n filename = data_file_statistics + file_num + \".dat\"\n fh = open(filename, mode='w')\n i = 0\n while i < 5:\n j = 0\n while j < 34:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n i += 1\n fh.write('\\n')\n i = 0\n data_row = 0\n while i < 20:\n j = 0\n while j < 5:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n data_i = 0\n while data_i < 27:\n fh.write(\"%13.5f\" % float(data[data_row * 27 + data_i]))\n data_i += 1\n data_row += 1\n j = 0\n while j < 2:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n fh.write('\\n')\n i += 1\n i = 0\n while i < 5:\n j = 0\n while j < 34:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n i += 1\n fh.write('\\n')\n fh.close()", "def LSIWriter(filename, pixels_per_column, pixels_per_row, channels, \n numeric_type_indicator, apodization_type, \n remap_type, image_plane_indicator, rf_center_frequency, \n rf_bandwidth, dwell_angle, cone_angle, graze_angle, twist_angle, \n column_sample_spacing, row_sample_spacing, \n column_oversampling_factor, row_oversampling_factor, \n column_resolution, row_resolution,\n text_header, data): \n \n file = open(filename, 'wb')\n \n # Write Int32 Header Values\n file.write(np.int32(pixels_per_column))\n file.write(np.int32(pixels_per_row))\n file.write(np.int32(channels))\n file.write(np.int32(numeric_type_indicator))\n file.write(np.int32(apodization_type))\n file.write(np.int32(remap_type))\n file.write(np.int32(image_plane_indicator)) \n \n # Write Float32 Header Values\n file.write(np.float32(rf_center_frequency))\n file.write(np.float32(rf_bandwidth))\n file.write(np.float32(dwell_angle))\n file.write(np.float32(cone_angle))\n file.write(np.float32(graze_angle))\n file.write(np.float32(twist_angle))\n file.write(np.float32(column_sample_spacing)) \n file.write(np.float32(row_sample_spacing))\n file.write(np.float32(column_oversampling_factor))\n file.write(np.float32(row_oversampling_factor))\n file.write(np.float32(column_resolution))\n file.write(np.float32(row_resolution))\n \n file.write(bytes('\\0' * (200-file.tell()),'utf-8'))\n\n # Exactly 200 characters \n file.write(bytes(text_header[:200].ljust(200), 'utf-8'))\n \n if numeric_type_indicator == 1:\n file.write(np.float32(data))\n elif numeric_type_indicator == 2:\n file.write(np.complex64(data))\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err)\n \n file.close()", "def _write_matrix_data(self, matrix):\n return matrix.newbyteorder('>').tobytes()", "def write_raw_file(self, data: bytes) -> None:\n pass", "def encode_file(matrix, fp):\n fp.write(IdxEncoder().write(matrix))", "def binary_out(array,fnam,dt=np.dtype(np.float64),endianness='big'):\n arrayout = np.array(array,dtype=dt)\n if sys.byteorder != endianness:\n arrayout.byteswap(True)\n arrayout.tofile(fnam)", "def write_PETSc_mat_dense(A,file):\n try:\n f = open(file, \"wb\")\n except:\n print(\"IO error:\", sys.exc_info()[0],file)\n\n import struct\n import numpy\n header = numpy.array([1211216])\n dims = A.shape\n nx = numpy.array(dims[0])\n ny = numpy.array(dims[1])\n matrixFormat = numpy.array([-1])\n\n\n # print('header')\n # print(header)\n # print(\"dims\")\n # print(dims)\n # print(\"nnz\")\n # print (nnz)\n # print (\"nrow\")\n # print (nrow,nrow.shape)\n # print (\"colidx\")\n # print (colidx,colidx.shape)\n # print('val')\n # print(A.data)\n f = open(file, \"wb\")\n header.astype('>i4').tofile(f)\n nx.astype('>i4').tofile(f)\n ny.astype('>i4').tofile(f)\n matrixFormat.astype('>i4').tofile(f)\n A.astype('>f8').tofile(f)\n f.close()\n return 0", "def _write_data_shape(self, filename, data):\n assert len(filename) > 0, 'filename cannot be empty.'\n\n if(os.path.exists(filename)):\n os.remove(filename)\n\n shape = F.shape(data)\n str_data = ''\n f = open(filename, \"a\");\n for s in shape:\n str_data += str(s)\n str_data += '|'\n f.write(str_data)\n f.close()", "def save_sparse_matrix(data,fmt,filepath):\n if fmt == 'tsv':\n m = data.tocoo()\n with open(filepath,'w') as out:\n for u,i,v in izip(m.row,m.col,m.data):\n print >>out,'{0}\\t{1}\\t{2}'.format(u+1,i+1,v)\n elif fmt == 'csv':\n m = data.tocoo()\n with open(filepath,'w') as out:\n for u,i,v in izip(m.row,m.col,m.data):\n print >>out,'{0},{1},{2}'.format(u+1,i+1,v)\n elif fmt == 'mm':\n mmwrite(filepath,data)\n elif fmt == 'npz':\n savez(data.tocoo(),filepath)\n elif fmt == 'fsm':\n fast_sparse_matrix(data).save(filepath)\n else:\n raise ValueError('unknown output format: {0}'.format(fmt))", "def write_to_file(self, data):", "def writeShortComplex(fileName, data):\n out_file = open(fileName, 'wb')\n data.copy().view(np.float).astype('>i2').tofile(out_file)\n out_file.close()", "def write_binproto_image(data, filename):\n data = data.transpose((2, 0, 1))\n data = data.reshape((1, ) + data.shape)\n blob = caffe.io.array_to_blobproto(data).SerializeToString()\n with open(filename, 'wb') as f:\n f.write(blob)", "def writeBinary(*args, **kwargs):\n \n pass", "def write_fortran(f, array, dtype=\"I\", check=True):\n f.write(struct.pack(dtype, array.nbytes))\n array.tofile(f)\n f.write(struct.pack(dtype, array.nbytes))", "def save_data(self, matrix, file_name, header = \"\"):\r\n formatted_header = \"\"\r\n np.set_printoptions(suppress=True,\r\n formatter={'float_kind':'{:f}'.format})\r\n if(isinstance(header, list)):\r\n for i in range(len(header)):\r\n header_el = header[i]\r\n missing_spaces = self.check_for_length(header[i])\r\n formatted_header = formatted_header + header[i] + \" \"*missing_spaces \r\n else:\r\n formatted_header = header\r\n \r\n f = open(file_name, \"w\")\r\n f.write(formatted_header + os.linesep)\r\n missing_spaces = np.zeros(matrix.shape[0])\r\n for i in range(matrix.shape[1]): \r\n write_string = \"\"\r\n for j in range(matrix.shape[0]):\r\n missing_space = self.check_for_length(matrix[j,i])\r\n missing_spaces[j] = missing_space\r\n write_string = write_string + \"{:.12f}\".format(matrix[j,i])+\" \"*missing_space\r\n f.write(write_string + os.linesep)\r\n f.close()", "def write_binary_file(output_path, data):\n with open(output_path, \"wb\") as f:\n f.write(data)", "def f_write_htk(data, targetfile, \n sampPeriod=50000, sampKind=9, data_format='f4', end='l'):\n if data.ndim==1:\n nSamples, vDim = data.shape[0], 1\n else:\n nSamples, vDim = data.shape\n if data_format=='f4':\n sampSize = vDim * 4;\n else:\n sampSize = vDim * 8;\n \n f = open(targetfile,'wb')\n\n if len(data_format)>0:\n if end=='l':\n data_format1 = '<i4'\n data_format2 = '<i2'\n elif end=='b':\n data_format1 = '>i4'\n data_format2 = '>i2'\n else:\n data_format1 = '=i4'\n data_format2 = '=i2'\n \n temp_data = np.array([nSamples, sampPeriod], \n dtype=np.dtype(data_format))\n temp_data.tofile(f, '')\n \n temp_data = np.array([sampSize, sampKind], dtype=np.dtype(data_format2))\n temp_data.tofile(f, '')\n \n \n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f, '')\n f.close()\n return True", "def write_data(data_array, path, header=None,) :\n print(\"Writing data to file : %s\"%(path))\n numRows = len(data_array)\n print(\"Data array length : %d\"%(numRows))\n \n ## Convert each row of data to formatted string\n linestrings=[]\n for i in range(numRows) :\n strVals = [str(i) for i in data_array[i]]\n linestrings.append(\"\\t\".join(strVals)+\"\\n\")\n\n # Write to ascii file with optional header\n with open(path, \"w\") as f :\n if header != None :\n f.write(header+\"\\n\")\n \n for row in linestrings :\n f.write(row)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
flag = write_raw_mat(data, filename, data_format='f4', end='l') Append data to an existing file on the file system as binary data input
def f_append_raw_mat(data, filename, data_format='f4', end='l'): if not isinstance(data, np.ndarray): print("Error write_raw_mat: input shoul be np.array") return False f = open(filename,'ab') if len(data_format)>0: if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype(data_format) temp_data = data.astype(datatype) else: temp_data = data temp_data.tofile(f,'') f.close() return True
[ "def f_write_raw_mat(data, filename, data_format='f4', end='l'):\n if not isinstance(data, np.ndarray):\n print(\"Error write_raw_mat: input should be np.array\")\n return False\n f = open(filename,'wb')\n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f,'')\n f.close()\n return True", "def write_data(num, data):\n file_num = \"%05d\" % num\n filename = data_file_statistics + file_num + \".dat\"\n fh = open(filename, mode='w')\n i = 0\n while i < 5:\n j = 0\n while j < 34:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n i += 1\n fh.write('\\n')\n i = 0\n data_row = 0\n while i < 20:\n j = 0\n while j < 5:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n data_i = 0\n while data_i < 27:\n fh.write(\"%13.5f\" % float(data[data_row * 27 + data_i]))\n data_i += 1\n data_row += 1\n j = 0\n while j < 2:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n fh.write('\\n')\n i += 1\n i = 0\n while i < 5:\n j = 0\n while j < 34:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n i += 1\n fh.write('\\n')\n fh.close()", "def LSIWriter(filename, pixels_per_column, pixels_per_row, channels, \n numeric_type_indicator, apodization_type, \n remap_type, image_plane_indicator, rf_center_frequency, \n rf_bandwidth, dwell_angle, cone_angle, graze_angle, twist_angle, \n column_sample_spacing, row_sample_spacing, \n column_oversampling_factor, row_oversampling_factor, \n column_resolution, row_resolution,\n text_header, data): \n \n file = open(filename, 'wb')\n \n # Write Int32 Header Values\n file.write(np.int32(pixels_per_column))\n file.write(np.int32(pixels_per_row))\n file.write(np.int32(channels))\n file.write(np.int32(numeric_type_indicator))\n file.write(np.int32(apodization_type))\n file.write(np.int32(remap_type))\n file.write(np.int32(image_plane_indicator)) \n \n # Write Float32 Header Values\n file.write(np.float32(rf_center_frequency))\n file.write(np.float32(rf_bandwidth))\n file.write(np.float32(dwell_angle))\n file.write(np.float32(cone_angle))\n file.write(np.float32(graze_angle))\n file.write(np.float32(twist_angle))\n file.write(np.float32(column_sample_spacing)) \n file.write(np.float32(row_sample_spacing))\n file.write(np.float32(column_oversampling_factor))\n file.write(np.float32(row_oversampling_factor))\n file.write(np.float32(column_resolution))\n file.write(np.float32(row_resolution))\n \n file.write(bytes('\\0' * (200-file.tell()),'utf-8'))\n\n # Exactly 200 characters \n file.write(bytes(text_header[:200].ljust(200), 'utf-8'))\n \n if numeric_type_indicator == 1:\n file.write(np.float32(data))\n elif numeric_type_indicator == 2:\n file.write(np.complex64(data))\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err)\n \n file.close()", "def write_raw_file(self, data: bytes) -> None:\n pass", "def _write_data_shape(self, filename, data):\n assert len(filename) > 0, 'filename cannot be empty.'\n\n if(os.path.exists(filename)):\n os.remove(filename)\n\n shape = F.shape(data)\n str_data = ''\n f = open(filename, \"a\");\n for s in shape:\n str_data += str(s)\n str_data += '|'\n f.write(str_data)\n f.close()", "def write_to_file(self, data):", "def encode_file(matrix, fp):\n fp.write(IdxEncoder().write(matrix))", "def save_data(self, matrix, file_name, header = \"\"):\r\n formatted_header = \"\"\r\n np.set_printoptions(suppress=True,\r\n formatter={'float_kind':'{:f}'.format})\r\n if(isinstance(header, list)):\r\n for i in range(len(header)):\r\n header_el = header[i]\r\n missing_spaces = self.check_for_length(header[i])\r\n formatted_header = formatted_header + header[i] + \" \"*missing_spaces \r\n else:\r\n formatted_header = header\r\n \r\n f = open(file_name, \"w\")\r\n f.write(formatted_header + os.linesep)\r\n missing_spaces = np.zeros(matrix.shape[0])\r\n for i in range(matrix.shape[1]): \r\n write_string = \"\"\r\n for j in range(matrix.shape[0]):\r\n missing_space = self.check_for_length(matrix[j,i])\r\n missing_spaces[j] = missing_space\r\n write_string = write_string + \"{:.12f}\".format(matrix[j,i])+\" \"*missing_space\r\n f.write(write_string + os.linesep)\r\n f.close()", "def write_magic_file(self, custom_name=None, dir_path=\".\", append=False):\n # *** maybe add some logical order to the column names, here?\n # *** i.e., alphabetical... see grid_frame3.GridBuilder.make_grid\n df = self.df\n # if indexing column was put in, remove it\n if \"num\" in self.df.columns:\n self.df.drop(\"num\", axis=1, inplace=True)\n dir_path = os.path.realpath(dir_path)\n if custom_name:\n fname = os.path.join(dir_path, custom_name)\n else:\n fname = os.path.join(dir_path, self.dtype + \".txt\")\n # add to existing file\n if append:\n print '-I- appending {} data to {}'.format(self.dtype, fname)\n mode = \"a\"\n # overwrite existing file\n elif os.path.exists(fname):\n print '-I- overwriting {}'.format(fname)\n mode = \"w\"\n # or create new file\n else:\n print '-I- writing {} data to {}'.format(self.dtype, fname)\n mode = \"w\"\n f = open(fname, mode)\n f.write('tab\\t{}\\n'.format(self.dtype))\n df.to_csv(f, sep=\"\\t\", header=True, index=False)\n f.close()", "def appendToFile(path, data):", "def _write_matrix_data(self, matrix):\n return matrix.newbyteorder('>').tobytes()", "def write_data(data_array, path, header=None,) :\n print(\"Writing data to file : %s\"%(path))\n numRows = len(data_array)\n print(\"Data array length : %d\"%(numRows))\n \n ## Convert each row of data to formatted string\n linestrings=[]\n for i in range(numRows) :\n strVals = [str(i) for i in data_array[i]]\n linestrings.append(\"\\t\".join(strVals)+\"\\n\")\n\n # Write to ascii file with optional header\n with open(path, \"w\") as f :\n if header != None :\n f.write(header+\"\\n\")\n \n for row in linestrings :\n f.write(row)", "def Output2File(data_array, filebase, format, hdr=None, shape=None):\n # used by 'AIDA_Functions.py'\n \n # below is old\n #if shape is None:\n #\n # shape = data_array.shape\n \n ### EHom (20130625): adding line to shape data_array according to shape input parameter\n ### Should have been here before\n if (shape != None):\n data_array.shape = shape\n \n import matplotlib.pyplot as plt\n #plt.figure()\n #plt.imshow(data_array)\n #plt.title(data_array[0,0])\n #plt.show()\n \n if format == 'm':\n\n Mrc.save(data_array, filebase + '.mrc', ifExists=\"overwrite\")\n \n # below is old way - Mrc.bindArr no longer exists in Priithon\n #rs = ''\n #\n #for i in shape:\n # \n # rs += '%d ' %i\n #\n #dtype = data_array.dtype\n #\n #temp = Mrc.bindArr(filebase + '.mrc', data_array.astype(np.float32))\n ## can only write out as single precision\n #fileheader = temp.Mrc.hdrArray[0]\n #fileheader.setfield('NumTitles',1)\n #fileheader.field('title')[0] = 'Shape: ' + rs\n #temp.Mrc.close()\n ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n \n elif format == 'f':\n\n if os.path.exists(filebase + '.fits') == 1:\n\n os.remove(filebase + '.fits')\n\n # Clement: using astropy.io.fits now\n \n fits_file = iofits.HDUList()\n datahdu = PrimaryHDU()\n datahdu.data = data_array\n \n \n iofits.append(filebase + '.fits',data_array,header=hdr)\n \n elif format == 't':\n if os.path.exists(filebase + '.tiff') == 1:\n\n os.remove(filebase + '.tiff')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tiff')\n \n elif format == 't2':\n if os.path.exists(filebase + '.tif') == 1:\n\n os.remove(filebase + '.tif')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tif')\n \n# Clement: Old version using pyfits (deprecated)\n# fits_file = pyfits.HDUList()\n# datahdu = pyfits.PrimaryHDU()\n# datahdu.data = data_array\n# \n# ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n# #if type(hdr) is not types.NoneType:\n# #\n# # datahdu.header = hdr\n# # \n# # print hdr\n# \n# # Provide header info from the original fits file.\n# \n# \n# fits_file.append(datahdu)\n# fits_file.writeto(filebase + '.fits')\n \n# else: # format must be .tiff\n# \n# #!!!! TENTATIVE !!!!\n# # make sure orientation of TIFF file matches convention\n# if len(data_array.shape) == 2:\n# \n# U.saveImg(data_array[...,::-1,...], filebase + \".tiff\")\n# elif len(data_array.shape) == 3:\n# \n# U.saveImg_seq(data_array[...,::-1,...], filebase + \".tiff\")\n# else:\n# \n# message = \"\\n'data_array' shape is not 2 or 3! Cannot write \" + \\\n# \"out TIFF file!\"\n# raise ValueError, message\n\n ### EHom (20130616): also output results (if 2D) as an 8-bit JPEG files using PIL\n ### In the division of 255, I hack the addition of a small value to avoid \n ### a divide by zero in a true_divide call\n if len(data_array.shape) == 2:\n\n min = data_array.min()\n max = data_array.max()\n #print data_array.min()\n #print data_array.max()\n #print data_array.mean()\n rescaled = np.where(data_array > min, data_array-min, 0.)\n if ((max - min) == 0):\n message = \"\\nMax Min problem in outputting array! Cannot write JPEG file\\n\"\n print(message)\n else:\n rescaled *= (255.0 / (max - min))\n # Clement: we don't need to save the jpeg\n # im = ImageOps.flip(Image.fromarray(rescaled.astype(np.uint8)))\n # rescale and flip vertically to properly register image with FITS output\n # im.save(filebase + '.jpeg')", "def write_binproto_image(data, filename):\n data = data.transpose((2, 0, 1))\n data = data.reshape((1, ) + data.shape)\n blob = caffe.io.array_to_blobproto(data).SerializeToString()\n with open(filename, 'wb') as f:\n f.write(blob)", "def writeVec(self, filename, mode='w'):\n # Check writing mode\n if not mode in 'wa':\n raise ValueError(\"Mode must be appending 'a' or writing 'w' \")\n # writing header/pointer file if not present and not append mode\n if not (os.path.isfile(filename) and mode in 'a'):\n binfile = sep.datapath + filename.split('/')[-1] + '@'\n # Copying SEPlib header file\n copyfile(self.vecfile, filename)\n # Substituting binary file\n with open(filename, 'a') as fid:\n fid.write(\"\\nin='%s'\\n\" % binfile)\n fid.close()\n else:\n binfile = sep.get_binary(filename)\n if mode in 'a':\n axes = sep.get_axes(filename)\n # Number of vectors already present in the file\n if self.shape == (1,):\n n_vec = axes[0][0]\n append_dim = self.ndim\n else:\n n_vec = axes[self.ndim][0]\n append_dim = self.ndim + 1\n with open(filename, mode) as fid:\n fid.write(\"n%s=%s o%s=0.0 d%s=1.0 \\n\" % (append_dim, n_vec + 1, append_dim, append_dim))\n fid.close()\n # Writing or Copying binary file\n if not (os.path.isfile(binfile) and mode in 'a'):\n copyfile(self.binfile, binfile)\n else:\n # Writing file if\n with open(binfile, mode + 'b') as fid, open(self.binfile, 'rb') as fid_toread:\n while True:\n data = fid_toread.read(BUF_SIZE)\n if not data:\n break\n fid.write(data)\n fid.close()\n fid_toread.close()\n return", "def binary_out(array,fnam,dt=np.dtype(np.float64),endianness='big'):\n arrayout = np.array(array,dtype=dt)\n if sys.byteorder != endianness:\n arrayout.byteswap(True)\n arrayout.tofile(fnam)", "def test_write_append():\n data = random_data('uint8', (21, 31))\n with TempFileName('append') as fname:\n with TiffWriter(fname) as tif:\n pass\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 0\n assert__str__(tif)\n\n with TiffWriter(fname, append=True) as tif:\n tif.save(data)\n with TiffFile(fname) as tif:\n assert len(tif.series) == 1\n assert len(tif.pages) == 1\n page = tif.pages[0]\n assert page.imagewidth == 31\n assert page.imagelength == 21\n assert__str__(tif)\n\n with TiffWriter(fname, append=True) as tif:\n tif.save(data)\n tif.save(data)\n with TiffFile(fname) as tif:\n assert len(tif.series) == 2\n assert len(tif.pages) == 3\n page = tif.pages[0]\n assert page.imagewidth == 31\n assert page.imagelength == 21\n assert_array_equal(tif.asarray(series=1)[1], data)\n assert__str__(tif)\n\n assert_valid(fname)", "def f_write_htk(data, targetfile, \n sampPeriod=50000, sampKind=9, data_format='f4', end='l'):\n if data.ndim==1:\n nSamples, vDim = data.shape[0], 1\n else:\n nSamples, vDim = data.shape\n if data_format=='f4':\n sampSize = vDim * 4;\n else:\n sampSize = vDim * 8;\n \n f = open(targetfile,'wb')\n\n if len(data_format)>0:\n if end=='l':\n data_format1 = '<i4'\n data_format2 = '<i2'\n elif end=='b':\n data_format1 = '>i4'\n data_format2 = '>i2'\n else:\n data_format1 = '=i4'\n data_format2 = '=i2'\n \n temp_data = np.array([nSamples, sampPeriod], \n dtype=np.dtype(data_format))\n temp_data.tofile(f, '')\n \n temp_data = np.array([sampSize, sampKind], dtype=np.dtype(data_format2))\n temp_data.tofile(f, '')\n \n \n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f, '')\n f.close()\n return True", "def write_fortran(f, array, dtype=\"I\", check=True):\n f.write(struct.pack(dtype, array.nbytes))\n array.tofile(f)\n f.write(struct.pack(dtype, array.nbytes))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write_htk(data,targetfile, sampPeriod=50000,sampKind=9,data_format='f4',end='l') Write data as HTKcompatible format input
def f_write_htk(data, targetfile, sampPeriod=50000, sampKind=9, data_format='f4', end='l'): if data.ndim==1: nSamples, vDim = data.shape[0], 1 else: nSamples, vDim = data.shape if data_format=='f4': sampSize = vDim * 4; else: sampSize = vDim * 8; f = open(targetfile,'wb') if len(data_format)>0: if end=='l': data_format1 = '<i4' data_format2 = '<i2' elif end=='b': data_format1 = '>i4' data_format2 = '>i2' else: data_format1 = '=i4' data_format2 = '=i2' temp_data = np.array([nSamples, sampPeriod], dtype=np.dtype(data_format)) temp_data.tofile(f, '') temp_data = np.array([sampSize, sampKind], dtype=np.dtype(data_format2)) temp_data.tofile(f, '') if len(data_format)>0: if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype(data_format) temp_data = data.astype(datatype) else: temp_data = data temp_data.tofile(f, '') f.close() return True
[ "def f_read_htk(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n \n \"\"\"if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n \"\"\" \n if 'f' in data_format:\n sample_size = int(head_info['SampleSize'][0]/4)\n else:\n print(\"Error in read_htk: input should be float32\")\n return False\n \n datatype = np.dtype((data_format,(sample_size,)))\n data = np.fromfile(f,dtype=datatype)\n f.close()\n return data", "def writeSegy(filename, Data, dt=1000, STHin={}, SHin={}):\n\n #printverbose(\"writeSegy : Trying to write \" + filename, 0)\n\n N = Data.shape\n ns = N[0]\n ntraces = N[1]\n # print(ntraces)\n # print(ns)\n\n if not len(SHin):\n SH = getDefaultSegyHeader(ntraces, ns, dt)\n else:\n SH = SHin\n if not len(STHin):\n STH = getDefaultSegyTraceHeaders(ntraces, ns, dt)\n else: \n STH = STHin \n \n\n writeSegyStructure(filename, Data, SH, STH)", "def f_read_htk_length(filename, data_format='f4', end='l'):\n if end=='l':\n data_format = '<'+data_format\n data_formatInt4 = '<i4'\n data_formatInt2 = '<i2'\n elif end=='b':\n data_format = '>'+data_format\n data_formatInt4 = '>i4'\n data_formatInt2 = '>i2'\n else:\n data_format = '='+data_format\n data_formatInt4 = '=i4'\n data_formatInt2 = '=i2'\n\n head_type = np.dtype([('nSample',data_formatInt4), \n ('Period',data_formatInt4),\n ('SampleSize',data_formatInt2), \n ('kind',data_formatInt2)])\n f = open(filename,'rb')\n head_info = np.fromfile(f,dtype=head_type,count=1)\n f.close()\n \n sample_size = int(head_info['SampleSize'][0]/4)\n return sample_size", "def _write(self, session, openFile, replaceParamFile):\n ## TODO: Ensure Other HMET Formats are supported\n hmetRecords = self.hmetRecords\n\n for record in hmetRecords:\n openFile.write('%s\\t%s\\t%s\\t%s\\t%.3f\\t%s\\t%s\\t%s\\t%s\\t%.2f\\t%.2f\\n' % (\n record.hmetDateTime.year,\n record.hmetDateTime.month,\n record.hmetDateTime.day,\n record.hmetDateTime.hour,\n record.barometricPress,\n record.relHumidity,\n record.totalSkyCover,\n record.windSpeed,\n record.dryBulbTemp,\n record.directRad,\n record.globalRad))", "def write_data(num, data):\n file_num = \"%05d\" % num\n filename = data_file_statistics + file_num + \".dat\"\n fh = open(filename, mode='w')\n i = 0\n while i < 5:\n j = 0\n while j < 34:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n i += 1\n fh.write('\\n')\n i = 0\n data_row = 0\n while i < 20:\n j = 0\n while j < 5:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n data_i = 0\n while data_i < 27:\n fh.write(\"%13.5f\" % float(data[data_row * 27 + data_i]))\n data_i += 1\n data_row += 1\n j = 0\n while j < 2:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n fh.write('\\n')\n i += 1\n i = 0\n while i < 5:\n j = 0\n while j < 34:\n fh.write(\"%13.5f\" % 0.0)\n j += 1\n i += 1\n fh.write('\\n')\n fh.close()", "def Write2File(fileNum, data, time, chNum):\n f = open(\"Data%s.txt\" % fileNum, 'w+')\n for row in range(len(data) / chNum):\n for col in range(chNum):\n # f.write(\"%i %f \" % (data[row*chNum + col], time[row*chNum + col]))s\n f.write(\"%s \" % (data[row * chNum + col]))\n f.write(\"\\n\")\n f.close()", "def write_to_file(self, data):", "def writeSegyStructure(filename, Data, SH, STH, endian='>'): # modified by A Squelch\n\n #printverbose(\"writeSegyStructure : Trying to write \" + filename, 0)\n\n f = open(filename, 'wb')\n\n # VERBOSE INF\n revision = SH[\"SegyFormatRevisionNumber\"]\n dsf = SH[\"DataSampleFormat\"]\n if (revision == 100):\n revision = 1\n if (revision == 256): # added by A Squelch\n revision = 1\n\n # try: # block added by A Squelch\n # DataDescr = SH_def[\"DataSampleFormat\"][\"descr\"][str(revision)][str(dsf)]\n # except KeyError:\n # print(\"\")\n # print(\" An error has ocurred interpreting a SEGY binary header key\")\n # print(\" Please check the Endian setting for this file: \", SH[\"filename\"])\n # sys.exit()\n\n #printverbose(\"writeSegyStructure : SEG-Y revision = \" + str(revision), 1)\n #printverbose(\"writeSegyStructure : DataSampleFormat=\" + str(dsf) + \"(\" + DataDescr + \")\", 1)\n\n # WRITE SEGY HEADER\n\n for key in SH_def.keys():\n pos = SH_def[key][\"pos\"]\n format = SH_def[key][\"type\"]\n value = SH[key]\n\n # SegyHeader[key],index = putValue(value,f,pos,format,endian);\n putValue(value, f, pos, format, endian)\n\n txt = str(pos) + \" \" + str(format) + \" Reading \" + key + \"=\" + str(value)\n # +\"=\"+str(SegyHeader[key])\n # printverbose(txt,-1)\n\n # SEGY TRACES\n\n ctype = SH_def['DataSampleFormat']['datatype'][revision][dsf]\n bps = SH_def['DataSampleFormat']['bps'][revision][dsf]\n\n sizeT = 240 + SH['ns'] * bps\n\n for itrace in range(SH['ntraces']):\n index = 3600 + itrace * sizeT\n #printverbose('Writing Trace #' + str(itrace + 1) + '/' + str(SH['ntraces']), 10)\n # WRITE SEGY TRACE HEADER\n for key in STH_def.keys():\n pos = index + STH_def[key][\"pos\"]\n format = STH_def[key][\"type\"]\n value = STH[key][itrace]\n txt = str(pos) + \" \" + str(format) + \" Writing \" + key + \"=\" + str(value)\n\n #printverbose(txt, 40)\n putValue(value, f, pos, format, endian)\n\n # Write Data\n cformat = endian + ctype\n for s in range(SH['ns']):\n strVal = struct.pack(cformat, Data[s, itrace])\n f.seek(index + 240 + s * struct.calcsize(cformat))\n f.write(strVal)\n\n f.close\n\n # return segybuffer", "def to_hdf(self, filename, **kwargs):\n from .output import to_hdf\n\n to_hdf(self, filename, **kwargs)", "def write(self,data):\n if not os.path.exists(os.path.dirname(self.outfilename)):\n os.makedirs(os.path.dirname(self.outfilename))\n\n if os.path.exists(self.outfilename):\n self.outfile = h5py.File(self.outfilename,'a')\n else:\n self.outfile = h5py.File(self.outfilename,'w')\n\n # Set permissions and group\n if self.set_permissions:\n os.chmod(self.outfilename,0o664)\n shutil.chown(self.outfilename, group=self.permissions_group)\n\n if self.level2 in self.outfile:\n del self.outfile[self.level2]\n lvl2 = self.outfile.create_group(self.level2)\n\n tod_dset = lvl2.create_dataset('averaged_tod',data=self.avg_tod, dtype=self.avg_tod.dtype)\n tod_dset.attrs['Unit'] = 'K'\n tod_dset.attrs['Calibration'] = '{self.cal_mode}:{self.cal_prefix}'\n\n freq_dset = lvl2.create_dataset('frequency',data=self.avg_frequency, dtype=self.avg_frequency.dtype)\n\n # Link the Level1 data\n data_filename = data['level1'].file.filename\n fname = data['level1'].file.filename.split('/')[-1]\n vane_file = data['level2/Vane'].file.filename\n\n # Copy over the statistics\n if 'Statistics' in lvl2:\n del lvl2['Statistics']\n grp = lvl2.create_group('Statistics')\n for k,v in data['level2/Statistics'].items():\n if isinstance(v,h5py.Group):\n grp2 = grp.create_group(k)\n for k1,v1 in v.items():\n grp2.create_dataset(k1,data=v1,dtype=v1.dtype)\n else:\n grp.create_dataset(k,data=v,dtype=v.dtype)\n\n\n data.close()\n if 'level1' in self.outfile:\n del self.outfile['level1']\n self.outfile['level1'] = h5py.ExternalLink(data_filename,'/')\n lvl2.attrs['version'] = __level2_version__\n\n # Add version info\n lvl2.attrs['pipeline-version'] = comancpipeline.__version__\n\n # Link the Level1 data\n if 'Vane' in lvl2:\n del lvl2['Vane']\n lvl2['Vane'] = h5py.ExternalLink('{}'.format(vane_file),'/')", "def write_hdf5(data, filename):\n import h5py as hp\n import numpy as np\n hfile = hp.File(filename, 'w')\n typ = type(data)\n if typ == dict:\n for k in data.iterkeys():\n # The straight code gives ustrings, which I don't like.\n# hfile[k] = data[k]\n exec(\"hfile['\" + k + \"'] = data['\" + k + \"']\")\n elif typ == np.ndarray:\n hfile['data'] = data\n hfile.close()", "def write_data(path, data: dict):\n # todo: Pass complete time series info (yunit etc.) if this is stored on TimeSeries object\n with h5py.File(path, \"w\") as f:\n for name, tx in data.items():\n t, x = tx\n start = t[0]\n delta = t[1] - t[0]\n dset = f.create_dataset(name, data=x)\n dset.attrs[\"name\"] = os.path.basename(name)\n dset.attrs[\"start\"] = start\n dset.attrs[\"delta\"] = delta\n dset.attrs[\"xunit\"] = \"s\"\n dset.attrs[\"yunit\"] = \"\"", "def writeFits(sOutFileName_p, data_p,header=None):\n data_p=np.rollaxis(data_p,2,0)\n if header==None:\n afits.writeto(sOutFileName_p,data_p,clobber=True)\n else:\n hdu=afits.PrimaryHDU(data=data_p,header=header,uint=True)\n hduList=afits.HDUList([hdu])\n hduList.writeto(sOutFileName_p,clobber=True)", "def write_hyd(self,fn=None):\n # currently the segment names here are out of sync with \n # the names used by write_parameters.\n # this is relevant for salinity-file, vert-diffusion-file\n # maybe surfaces-file, depths-file.\n # for example, surfaces file is written as tbd-SURF.seg\n # but below we call it com-tbd.srf\n # maybe easiest to just change the code below since it's\n # already arbitrary\n fn=fn or os.path.join( self.scenario.base_path,\n self.fn_base+\".hyd\")\n if os.path.exists(fn):\n if self.overwrite:\n os.unlink(fn)\n else:\n self.log.warning(\"hyd file %s already exists. Not overwriting!\"%fn)\n return\n \n name=self.scenario.name\n\n dfmt=\"%Y%m%d%H%M%S\"\n\n scu=self.scenario.scu\n\n # If symlinking, we want to report the full time period.\n if self.enable_write_symlink:\n time_start,time_stop,timedelta=self.timeline_data()\n else:\n time_start,time_stop,timedelta = self.timeline_scen()\n \n timestep = timedelta_to_waq_timestep(timedelta)\n\n self.infer_2d_elements()\n n_layers=1+self.seg_k.max()\n\n # New code - maybe not right at all.\n # This code is also duplicated across several of the classes in this file.\n # crying out for refactoring.\n if 'temp' in self.parameters():\n temp_file=\"'%s-temp.seg'\"%name\n else:\n temp_file='none'\n \n if 'tau' in self.parameters():\n tau_file=\"'%s-tau.seg'\"%name\n else:\n tau_file='none'\n \n lines=[\n \"file-created-by SFEI, waq_scenario.py\",\n \"file-creation-date %s\"%( datetime.datetime.utcnow().strftime('%H:%M:%S, %d-%m-%Y') ),\n \"task full-coupling\",\n \"geometry unstructured\",\n \"horizontal-aggregation no\",\n \"reference-time '%s'\"%( self.time0.strftime(dfmt) ),\n \"hydrodynamic-start-time '%s'\"%( time_start.strftime(dfmt) ),\n \"hydrodynamic-stop-time '%s'\"%( time_stop.strftime(dfmt) ),\n \"hydrodynamic-timestep '%s'\"%timestep, \n \"conversion-ref-time '%s'\"%( self.time0.strftime(dfmt) ),\n \"conversion-start-time '%s'\"%( time_start.strftime(dfmt) ),\n \"conversion-stop-time '%s'\"%( time_stop.strftime(dfmt) ),\n \"conversion-timestep '%s'\"%timestep, \n \"grid-cells-first-direction %d\"%self.n_2d_elements,\n \"grid-cells-second-direction 0\",\n \"number-hydrodynamic-layers %s\"%( n_layers ),\n \"number-horizontal-exchanges %d\"%( self.n_exch_x ),\n \"number-vertical-exchanges %d\"%( self.n_exch_z ),\n # little white lie. this is the number in the top layer.\n # and no support for water-quality being different than hydrodynamic\n \"number-water-quality-segments-per-layer %d\"%( self.n_2d_elements),\n \"number-water-quality-layers %s\"%( n_layers ),\n \"hydrodynamic-file '%s'\"%self.fn_base,\n \"aggregation-file none\",\n # filename handling not as elegant as it could be..\n # e.g. self.vol_filename should probably be self.vol_filepath, then\n # here we could reference the filename relative to the hyd file\n \"grid-indices-file '%s.bnd'\"%self.fn_base,# lies, damn lies\n \"boundaries-file '%s.bnd'\"%self.fn_base, # this one might be true.\n \"grid-coordinates-file '%s'\"%self.flowgeom_filename,\n \"attributes-file '%s.atr'\"%self.fn_base,\n \"volumes-file '%s.vol'\"%self.fn_base,\n \"areas-file '%s.are'\"%self.fn_base,\n \"flows-file '%s.flo'\"%self.fn_base,\n \"pointers-file '%s.poi'\"%self.fn_base,\n \"lengths-file '%s.len'\"%self.fn_base,\n \"salinity-file '%s-salinity.seg'\"%name,\n \"temperature-file %s\"%temp_file,\n \"vert-diffusion-file '%s-vertdisper.seg'\"%name,\n # not a segment function!\n \"surfaces-file '%s'\"%self.surf_filename,\n \"shear-stresses-file %s\"%tau_file,\n \"hydrodynamic-layers\",\n \"\\n\".join( [\"%.5f\"%(1./n_layers)] * n_layers ),\n \"end-hydrodynamic-layers\",\n \"water-quality-layers \",\n \"\\n\".join( [\"1.000\"] * n_layers ),\n \"end-water-quality-layers\"]\n txt=\"\\n\".join(lines)\n with open(fn,'wt') as fp:\n fp.write(txt)", "def Output2File(data_array, filebase, format, hdr=None, shape=None):\n # used by 'AIDA_Functions.py'\n \n # below is old\n #if shape is None:\n #\n # shape = data_array.shape\n \n ### EHom (20130625): adding line to shape data_array according to shape input parameter\n ### Should have been here before\n if (shape != None):\n data_array.shape = shape\n \n import matplotlib.pyplot as plt\n #plt.figure()\n #plt.imshow(data_array)\n #plt.title(data_array[0,0])\n #plt.show()\n \n if format == 'm':\n\n Mrc.save(data_array, filebase + '.mrc', ifExists=\"overwrite\")\n \n # below is old way - Mrc.bindArr no longer exists in Priithon\n #rs = ''\n #\n #for i in shape:\n # \n # rs += '%d ' %i\n #\n #dtype = data_array.dtype\n #\n #temp = Mrc.bindArr(filebase + '.mrc', data_array.astype(np.float32))\n ## can only write out as single precision\n #fileheader = temp.Mrc.hdrArray[0]\n #fileheader.setfield('NumTitles',1)\n #fileheader.field('title')[0] = 'Shape: ' + rs\n #temp.Mrc.close()\n ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n \n elif format == 'f':\n\n if os.path.exists(filebase + '.fits') == 1:\n\n os.remove(filebase + '.fits')\n\n # Clement: using astropy.io.fits now\n \n fits_file = iofits.HDUList()\n datahdu = PrimaryHDU()\n datahdu.data = data_array\n \n \n iofits.append(filebase + '.fits',data_array,header=hdr)\n \n elif format == 't':\n if os.path.exists(filebase + '.tiff') == 1:\n\n os.remove(filebase + '.tiff')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tiff')\n \n elif format == 't2':\n if os.path.exists(filebase + '.tif') == 1:\n\n os.remove(filebase + '.tif')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tif')\n \n# Clement: Old version using pyfits (deprecated)\n# fits_file = pyfits.HDUList()\n# datahdu = pyfits.PrimaryHDU()\n# datahdu.data = data_array\n# \n# ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n# #if type(hdr) is not types.NoneType:\n# #\n# # datahdu.header = hdr\n# # \n# # print hdr\n# \n# # Provide header info from the original fits file.\n# \n# \n# fits_file.append(datahdu)\n# fits_file.writeto(filebase + '.fits')\n \n# else: # format must be .tiff\n# \n# #!!!! TENTATIVE !!!!\n# # make sure orientation of TIFF file matches convention\n# if len(data_array.shape) == 2:\n# \n# U.saveImg(data_array[...,::-1,...], filebase + \".tiff\")\n# elif len(data_array.shape) == 3:\n# \n# U.saveImg_seq(data_array[...,::-1,...], filebase + \".tiff\")\n# else:\n# \n# message = \"\\n'data_array' shape is not 2 or 3! Cannot write \" + \\\n# \"out TIFF file!\"\n# raise ValueError, message\n\n ### EHom (20130616): also output results (if 2D) as an 8-bit JPEG files using PIL\n ### In the division of 255, I hack the addition of a small value to avoid \n ### a divide by zero in a true_divide call\n if len(data_array.shape) == 2:\n\n min = data_array.min()\n max = data_array.max()\n #print data_array.min()\n #print data_array.max()\n #print data_array.mean()\n rescaled = np.where(data_array > min, data_array-min, 0.)\n if ((max - min) == 0):\n message = \"\\nMax Min problem in outputting array! Cannot write JPEG file\\n\"\n print(message)\n else:\n rescaled *= (255.0 / (max - min))\n # Clement: we don't need to save the jpeg\n # im = ImageOps.flip(Image.fromarray(rescaled.astype(np.uint8)))\n # rescale and flip vertically to properly register image with FITS output\n # im.save(filebase + '.jpeg')", "def writeOntargetAmpliconFile(outType, batchId, ampLen, tm, ofh, minSpec=0, minFusi=0):\n inSeq, db, pamPat, position, extSeq = readBatchParams(batchId)\n batchBase = join(batchDir, batchId)\n otBedFname = batchBase+\".bed\"\n otMatches = parseOfftargets(otBedFname)\n\n startDict, endSet = findAllPams(inSeq, pamPat)\n pamSeqs = list(flankSeqIter(inSeq, startDict, len(pamPat), True))\n\n allEffScores = readEffScores(batchId)\n guideData, guideScores, hasNotFound, pamIdToSeq = mergeGuideInfo(inSeq, startDict, pamPat, otMatches, position, allEffScores, sortBy=\"pos\")\n\n if outType==\"primers\":\n headers = [\"#guideId\", \"forwardPrimer\", \"leftPrimerTm\", \"revPrimer\", \"revPrimerTm\", \"ampliconSequence\", \"guideSequence\"]\n else:\n headers = [\"#guideId\", \"ampliconSequence\", \"guideSequence\"]\n\n ofh.write(\"\\t\".join(headers))\n ofh.write(\"\\n\")\n \n #for pamId, pamStart, guideStart, strand, guideSeq, pamSeq, pamPlusSeq in pamSeqs:\n for guideScore, guideCfdScore, effScores, startPos, guideStart, strand, pamId, \\\n guideSeq, pamSeq, otData, otDesc, last12Desc, mutEnzymes, \\\n ontargetDesc, subOptMatchCount in guideData:\n\n if guideScore < minSpec:\n continue\n if effScores[\"fusi\"] < minFusi:\n continue\n\n chrom, start, end, strand, gene, isUnique = findOntargetPos(otMatches, pamId, position)\n effScores = allEffScores[pamId]\n\n note = \"\"\n if not isUnique:\n note = \"warning: guide has no unique match in genome\"\n\n lSeq, lTm, lPos, rSeq, rTm, rPos, targetSeq, ampRange, flankSeq = \\\n designPrimer(db, chrom, start, end, strand, 0, batchId, ampLen, tm)\n\n pamName = intToExtPamId(pamId)\n if outType==\"primers\":\n row = [pamName, lSeq, lTm, rSeq, rTm, targetSeq, guideSeq]\n else:\n row = [pamName, targetSeq, guideSeq]\n\n row = [str(x) for x in row]\n ofh.write(\"\\t\".join(row))\n ofh.write(\"\\n\")", "def write_defs(k,t):\n print \"Writing.\"\n sleep(.5)\n output = open(t, 'w')\n for k,v in sorted(k.iteritems(), key=lambda (k,v): v['order']):\n output.write(k.encode('utf-8'))\n output.write('/' + v['definition'] )\n output.write('\\n')\n output.close()\n print 'お待たせいたしました。'", "def write(self, file_name) :\n\n # Add the data\n Col = pyfits.Column(name='DATA', format=self.data_format, \n array=self.data)\n columns = [Col,]\n \n # Add all the other stored fields.\n for field_name in self.field.iterkeys() :\n Col = pyfits.Column(name=field_name,\n format=self.formats[field_name],\n array=self.field[field_name])\n columns.append(Col)\n coldefs = pyfits.ColDefs(columns)\n # Creat fits header data units, one for the table and the mandatory\n # primary.\n tbhdu = pyfits.new_table(coldefs)\n prihdu = pyfits.PrimaryHDU()\n # Add the write history.\n fname_abbr = ku.abbreviate_file_path(file_name)\n self.history.add('Written to file.', ('File name: ' + fname_abbr,))\n # Add the history to the header.\n bf.write_history_header(prihdu.header, self.history)\n\n # Combine the HDUs and write to file.\n hdulist = pyfits.HDUList([prihdu, tbhdu])\n hdulist.writeto(file_name, clobber=True)\n if self.feedback > 0 :\n print 'Wrote data to file: ' + fname_abbr", "def training_file(Inputs, Outputs, Ins, Outs, dt):\r\n header = \"%eval_id interface time \"\r\n for In in Ins:\r\n header+= In +' '\r\n for Out in Outs:\r\n header+= Out + \" \"\r\n training_data = ''\r\n for i in range(len(Outputs[0] ) -1):\r\n training_data += str(i+1) + ' NO_ID ' + str(dt*(i+1)) + ' '\r\n for j in range(len(Inputs)):\r\n training_data += str(Inputs[j, 0]) + ' '\r\n for k in range(len(Outputs)):\r\n training_data += str(Outputs[ k ,i+1])+ ' '\r\n training_data += '\\n'\r\n training_file = header + '\\n' + training_data\r\n with open('./training_data','w') as f:\r\n f.write(training_file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
dic = read_dic(file_path) Read a json file from file_path and return a dictionary input
def read_dic(file_path): try: data = json.load( open(file_path) ) except IOError: print("Cannot find %s" % (file_path)) sys.exit(1) except json.decoder.JSONDecodeError: print("Cannot parse %s" % (file_path)) sys.exit(1) return data
[ "def get_json_dict(filepath):\n with open(filepath, encoding=\"utf8\") as infile:\n return json.load(infile)", "def read_json_file(file_path: str) -> Dict:\n with open(file_path, 'r') as file:\n data = file.read()\n return json.loads(data)", "def get_json_dict(json_file_name: str) -> dict:\n with open(json_file_name, 'r') as JSON:\n return json.load(JSON)", "def import_json(file_path: str) -> dict:\n with open(file_path, \"r\", encoding=\"utf8\") as json_file:\n return json.load(json_file)", "def ReadDict( filename ):\r\n\tif not os.path.isfile( filename ): return {}\r\n\treturn eval( open( filename, 'rt' ).read() )", "def load_json(filepath):\n data = dict()\n with open(filepath) as data_file: \n data = json.load(data_file)\n return data", "def load_json(json_file):\n \n with open(json_file, \"r\") as file:\n dictionary = json.load(file)\n return dictionary", "def read_json_file(path_):\n with open(path_, \"r\") as f:\n return json.loads(f.read(), object_pairs_hook=OrderedDict)", "def read_json_files():\n\n jsons = dict()\n with open('json_files/config.json') as file:\n data_conf = json.load(file)\n jsons['base_url'] = data_conf['base_url']\n jsons['implicit_wait'] = data_conf['implicit_wait']\n jsons['os'] = data_conf['os']\n jsons['is_headless'] = (data_conf['headless'] == 'True')\n\n with open('json_files/state.json') as file:\n data_states = json.load(file)\n jsons['list_states'] = data_states['states']\n\n with open('json_files/district.json') as file:\n jsons['dict_districts'] = json.load(file)\n\n with open('json_files/sub_district.json') as file:\n jsons['dict_sub_districts'] = json.load(file)\n\n with open('json_files/gram_panchayat.json') as file:\n jsons['dict_gram_panchayats'] = json.load(file)\n\n with open('json_files/village.json') as file:\n jsons['dict_villages'] = json.load(file)\n\n return jsons", "def open_json(path: str, mode: str = 'r') -> dict:\n data = {}\n with open(path, mode) as json_file:\n data = json.loads(json_file.read())\n return data", "def _read_json(self,fname):\n\n with open(fname) as f:\n data = json.load(f)\n\n return data", "def open_json(self, filename: str) -> dict | None:\n json_path = os.path.join(self.directory, filename)\n try:\n with open(json_path, \"r\") as json_file:\n return json.load(json_file)\n except FileNotFoundError:\n print(f\"Couldn't find {filename}. (path: {json_path}) file.\")\n return None", "def _load_json_from_path(json_path: str) -> Dict:\n with open(json_path, \"r\", encoding='utf-8') as json_file:\n if os.stat(json_path).st_size != 0: # If the file is not empty:\n return json.load(json_file)", "def get_json_dict(json_file: str):\n with open(json_file, 'r', encoding='utf-8') as file:\n data = json.load(file)\n if type(data) is list:\n return data[0]\n else:\n return data", "def read_optimization_json(filepath):\n\n with open(filepath) as file:\n opt_dict = json.load(file)\n\n return opt_dict", "def read_json_file(var_path, filename):\n vars_fh = open(filename, 'rb')\n json_vars = json.load(vars_fh)\n if not isinstance(json_vars, dict):\n raise Exception(\"JSON file needs to be a dictionary\")\n\n vars_dict = {}\n for (k, v) in json_vars.iteritems():\n vars_dict[\"{}_{}\".format(var_path, k)] = v\n return vars_dict", "def _read_stats_file(path):\n with open(path, \"r\") as f:\n dct = json.load(f)\n dct = {int(k): v for k, v in dct.items()}\n return dct", "def jsonread(filename): \n res = None", "def load_dictionary(self):\n with open(self.default_dict_path) as file_object:\n self.dictionary = json.load(file_object)", "def load_data(filepath):\n with open(filepath, \"r\") as input_file:\n json_data = json.load(input_file)\n return json_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write_dic(dic, file_path) Write a dictionary to file input
def write_dic(dic, file_path): try: json.dump(dic, open(file_path, 'w')) except IOError: print("Cannot write to %s " % (file_path)) sys.exit(1)
[ "def save_dictionary(worddict, wordcount, loc):\n with open(loc, 'w') as f:\n pkl.dump(worddict, f)\n pkl.dump(wordcount, f)", "def WriteDict( d, filename, *fields ):\r\n\tif len( fields ): d = dict( ( k, v ) for k, v in d.items() if k in fields )\r\n\tfile = open( MakeWayFor( filename ), 'wt' )\r\n\tfile.write( '{\\n' )\r\n\tfor k, v in sorted( d.items() ): file.write( '\\t%s : %s,\\n' % ( repr( k ), repr( v ) ) )\r\n\tfile.write( '}\\n' )\r\n\tfile.close()", "def write_db_dictionary(path, dictionary: dict):\n # Write to sqlite\n with SqliteDict(path + '.sqlite') as d:\n for key, value in dictionary.items():\n d[key] = value\n\n # Now also dump the pickled version of the dictionary itself\n with open(path + '.pickle', 'wb') as f:\n pickle.dump(dictionary, f)", "def saveToFile(dict):\n f = codecs.open(database_path, \"w\", \"utf-8\")\n f.write(str(dict))\n f.close()", "def write_dict(fp, dict_name, attributes):\n uid_entry = (\n \"('{UID Name}', '{UID Type}', '{UID Info}', '{Retired}', \" \"'{UID Keyword}')\"\n )\n entry_format = \"'{UID Value}': %s\" % (uid_entry)\n\n fp.write(f\"\\n{dict_name} = {{\\n \")\n fp.write(\", # noqa\\n \".join(entry_format.format(**attr) for attr in attributes))\n fp.write(\" # noqa\\n}\\n\")", "def write_to_json_file(file_path, dict):\n directory = os.path.dirname(file_path)\n os.makedirs(directory, exist_ok=True)\n json_obj = json.dumps(dict)\n fout = open(file_path, \"w\")\n fout.write(json_obj)\n fout.close()", "def save_dictionary(dictionary, path_for_dictionary_dir, overwrite = False):\n\n\n if path_for_dictionary_dir[-5:] == '.json':\n\n with open(os.path.join(path_for_dictionary_dir), 'w') as temp_file:\n json_dict = json.dumps(dictionary, indent = 4, sort_keys = True)\n temp_file.write(json_dict)\n\n else:\n\n if os.path.exists(path_for_dictionary_dir) == True:\n\n if overwrite == False:\n\n raise NameError('Error: Dictionary Directory Already Exists at this path')\n\n else:\n\n os.makedirs(path_for_dictionary_dir)\n\n\n\n for temp_key in dictionary.keys():\n\n\n if type(dictionary[temp_key]) == dict:\n\n to_overwrite = overwrite\n save_dictionary(dictionary[temp_key], os.path.join(path_for_dictionary_dir, temp_key), overwrite = to_overwrite)\n\n else:\n\n\n if path_for_dictionary_dir[-3:] == 'txt':\n\n with open(os.path.join(path_for_dictionary_dir, temp_key), 'w') as temp_file:\n temp_file.write(str(dictionary[temp_key]))\n\n else:\n\n np.save(os.path.join(path_for_dictionary_dir, temp_key), dictionary[temp_key])\n\n\n return", "def dumpdict(dic, filename):\n\n with open(filename,mode=\"w\", encoding=\"utf-8\") as file:\n yaml.dump(dic, file)", "def write_cache(cache_file, cache_dict):\n CACHE_FNAME = cache_file\n dumped = json.dumps(cache_dict)\n fw = open(CACHE_FNAME,\"w\")\n fw.write(dumped)\n fw.close() # Close the open file", "def write_dict_as_str(filename, mydictionary, security='low'):\n \n with open(filename, 'w') as f:\n print(mydictionary, file=f)", "def write_dict_to_text(f_name, python_dict):\n with open(f_name, 'w') as f:\n for key, value in python_dict.items():\n f.write('%s, %s\\n' % (key, value))", "def pickle_save_dict(f, d):\n import pickle\n pickle.dump( d, open( f, 'wb' ) )", "def save_rings_map(ring_dic,filename):\n\n ring_map = OrderedDict(sorted(ring_dic.iteritems(), key = lambda key_value : int(10*float(key_value[0]))))\n\n f = open(filename,'w')\n f.write('{0:>8}\\n'.format(len(ring_map)))\n ic = 1\n for key,val in ring_map.iteritems():\n f.write('{0}\\n'.format(ic))\n f.write('{0}\\n'.format(len(val)))\n for kw in val:\n f.write(' {0}'.format(kw))\n f.write('\\n\\n')\n ic+=1\n f.close()", "def to_file(filename, dicts):\n\n with open(filename, \"w\") as f:\n for order, dictionary in dicts:\n f.write(\"%s \" % order)", "def save_dict(dict_obj, path):\n assert path[-4:] == '.npy', 'Missing the .npy extension!'\n\n np.save(os.path.expanduser(path), dict_obj)", "def __force_writing_new_mapping(filename, mapping_dict):\n with open(filename, 'w+') as f:\n for mod in mapping_dict.keys():\n mapping_string = ' '.join(map(str, mapping_dict[mod]))\n string_fin = '{} {}\\n'.format(mod, mapping_string)\n f.write(string_fin)\n return", "def save_dictionary(self, path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass\n # Extract species from all the entries\n species_dict = {}\n entries = self.entries.values()\n for entry in entries:\n for reactant in entry.item.reactants:\n if reactant.label not in species_dict:\n species_dict[reactant.label] = reactant\n\n for product in entry.item.products:\n if product.label not in species_dict:\n species_dict[product.label] = product\n\n with open(path, 'w') as f:\n for label in species_dict.keys():\n f.write(species_dict[label].molecule[0].to_adjacency_list(label=label, remove_h=False))\n f.write('\\n')", "def save_dictionary_lengths_and_court(out_dict, lengths_and_court_importance):\n dictionary = None\n with open(out_dict, 'rb') as f:\n dictionary = pickle.load(f)\n dictionary_as_map = {}\n\n for entry in dictionary:\n term_info = TermInfo(entry)\n dictionary_as_map[entry.term] = term_info\n\n dir_path = '.'\n write_lengths_and_dictionary(lengths_and_court_importance, dictionary_as_map,\\\n dir_path, out_dict)", "def output_file2(in_dict, directory, binned=None, pairs=None, ordered=None):\n import json\n\n if binned == True:\n json = json.dumps(in_dict)\n filename = directory + '/binned_dict2.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved binned_dict to %s' %filename)\n \n elif pairs == True:\n json = json.dumps(in_dict)\n filename = directory + '/pairs_list2.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved pairs_list to %s' %filename)\n \n elif ordered == True:\n json = json.dumps(in_dict)\n filename = directory + '/ordered_list2.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved ordered_list to %s' %filename)\n\n else:\n json = json.dumps(in_dict)\n filename = directory + '/output.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved dict to \"output.json\"')", "def write_classification_to_file(dir, dic):\r\n with open(os.path.join(dir,\"!prediction.txt\"),'w',encoding=\"utf-8\") as f:\r\n for key in dic:\r\n f.write(key + \" \" + dic[key] + \"\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
data = pickle_load(file_path) Load data from a pickle dump file
def pickle_load(file_path): with open(file_path, 'rb') as file_ptr: data = pickle.load(file_ptr) return data
[ "def load_pickle_file(file_name):\n data_values = None # Define here to establish scope\n log.info(\"LOAD PICKLE: Open the pickle file\")\n with open(file_name, 'rb') as pickle_file:\n data_values = pickle.load(pickle_file)\n\n log.info(\"LOAD PICKLE: Print the loaded pickle data\")\n pprint.pprint(data_values)", "def pickle_load(path):\n return pickle.load(open(path, \"rb\"))", "def _unpickle(filename):\n file_path = _get_file_path(filename)\n print(\"Loading data: \" + file_path)\n with open(file_path, mode='rb') as file:\n if python_version == \"2\":\n data = pickle.load(file)\n else:\n data = pickle.load(file, encoding=\"bytes\")\n return data", "def poincare_load(filename):\n with open(filename, 'rb') as input:\n data = pickle.load(input)\n return data", "def loadpickle(fname):\r\n return pickle.load(open(fname, 'rb'))", "def load_dump(pathway: str) -> Any:\n\n pathway = dumps_path.joinpath(pathway)\n with open(pathway, 'rb') as f:\n obj = pickle.load(f)\n logging.info(f'Upload from dump: {pathway}')\n return obj", "def loadData(fname):\r\n (grating, params, lines, meta) = pickle.load(open(fname, \"r\"))\r\n return grating, params, lines, meta", "def loadfile(path):\n if not os.path.exists(path):\n return {}\n with open(path, 'r') as fp:\n tagdata = pickle.load(fp)\n return tagdata", "def load_dump(name: str) -> 'LeanLib':\n with open(name, 'rb') as f:\n return pickle.load(f)", "def dump_pickle(data, filepath):\n with open(filepath, \"wb\") as file:\n pickle.dump(data, file)", "def load(self, filename):\n raise NotImplementedError(\"Loading from pickled files is not yet supported.\")", "def load(self,filename):\n\t\ttry:\n\t\t\tf = open(filename,\"r\")\n\t\t\tself.stats = pickle.load(f)\n\t\t\tf.close()\n\t\texcept:\n\t\t\tprint \"Could not open file \",filename\n\t\t\traise", "def pickle(data, path):\n with open(path, \"wb\") as file_handler:\n pl.dump(data, file_handler)", "def load_pickle(filename):\n with open(filename, 'rb') as f:\n return pkl.load(f, encoding='latin1')", "def unpickle(file_path):\n # type: (str) -> dict\n with open(file_path, 'rb') as fo:\n data_dict = pickle.load(fo)\n return data_dict", "def load_obj(path):\n with open(path, \"rb\") as f:\n return pickle.load(f)", "def test_pickle_load():\n for entry in pickle_result:\n pickle.loads(entry)", "def unpack(data):\n\n try:\n return pickle.loads(data)\n except Exception as err:\n logger.error(err)", "def load_auction_p(fname):\n return pickle.load(open(fname, \"rb\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise EC2ResponseError the first n times that the method is called.
def _fail_for_n_calls(self, n, status=400): self.num_calls += 1 if self.num_calls <= n: e = EC2ResponseError(status, None) e.error_code = 'InvalidInstanceID.NotFound' raise e
[ "def test_five_failures(self):\n function = aws_service.retry_boto(\n self._fail_for_n_calls,\n r'InvalidInstanceID\\.NotFound',\n initial_sleep_seconds=0.0\n )\n function(5)", "def test_503(self):\n function = aws_service.retry_boto(\n self._fail_for_n_calls, initial_sleep_seconds=0.0)\n function(5, status=503)", "def _raise_exception_for_repeated_timeouts():\n _failure_msg = \"The script was unable to complete successfully after five consecutive API timeouts. \" + \\\n \"Please run the script again or contact Khoros Support for further assistance.\"\n logger.error(_failure_msg)\n raise errors.exceptions.APIConnectionError(_failure_msg)", "def _retry_get(self, uri):\r\n for i in six.moves.range(DEFAULT_RETRY):\r\n resp, body = self.api.method_get(uri)\r\n if body:\r\n return resp, body\r\n # Tried too many times\r\n raise exc.ServiceResponseFailure(\"The Cloud DNS service failed to \"\r\n \"respond to the request.\")", "def test_call_raises_http_error_after_max_retries_when_status_code_in_retry_list(self):\n max_retries = 3\n self.make_retry_call_with_error_code(503, max_retries=max_retries)\n # Check that the request call was made max_retries + 1 times. The +1 is\n # to account for the initial request call.\n self.assertEqual(max_retries + 1, self.session.request.call_count,\n \"Call should have been made 'max_retries' + 1 times\")", "def test_call_raises_http_error_immediately_when_status_code_not_in_retry_list(self):\n self.make_retry_call_with_error_code(404, max_retries=3)\n self.assertEqual(1, self.session.request.call_count,\n \"Request call should have been made only once\")", "def _handle_response_error(self, response, retries, **kwargs):\n return response", "def _batch_response(self, request_id, response, exception):\n\n if exception is not None:\n logging.error(exception)\n logging.error('API Request Error! ' + str(response))", "def test_ssl_error(self):\n\n def raise_ssl_error():\n self.num_calls += 1\n if self.num_calls <= 5:\n raise ssl.SSLError('Test')\n\n aws_service.retry_boto(raise_ssl_error, initial_sleep_seconds=0.0)()", "def test_n_retry(self):\n r = retrying.retry(stop_max_attempt_number=10)(fail_n(9))\n\n fake_time = FakeTime()\n with fake_time:\n r()\n self.assertEqual(fake_time.mock_sleep.calls, 9)", "def update_error():\n requests[\"error\"] += 1", "def _retry(self, request, *args, **kwargs):\n for attempts_left in range(self.max_retries + 1, -1, -1):\n try:\n result = request(*args, **kwargs)\n except requests.HTTPError as e:\n if e.response.status_code >= 500 and attempts_left > 0:\n logging.info(\n 'Server error ({} attempts left). Timeouts and retries '\n 'in {}.'.format(attempts_left, self.retry_timeout))\n time.sleep(self.retry_timeout)\n else:\n raise\n else:\n break\n\n return result", "def increment_content_failures(self, count: int = 1):", "def _retry_5(fun):\n return _retry(fun, n=5, sleep_s=1)", "def test_user_cannot_report_multiple_times(self):\n response = self.report_multiple_times()\n message = 'You cannot report this article multiple times'\n self.base_report(message, response)\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)", "def _callback_on_error(self):\n try:\n yield\n except:\n self._error_count += 1\n self._do_callback()\n raise", "def test(self, failure_rate, iteration_n):\n pass", "def get_next_responses(self, n):\n return # osid.assessment.Response", "def _retry_3(fun):\n return _retry(fun, n=3, sleep_s=1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that we handle failing 5 times and succeeding the 6th time.
def test_five_failures(self): function = aws_service.retry_boto( self._fail_for_n_calls, r'InvalidInstanceID\.NotFound', initial_sleep_seconds=0.0 ) function(5)
[ "def test(self, failure_rate, iteration_n):\n pass", "def test_runUntilFailure(self):\n stream = StringIO()\n case = erroneous.EventuallyFailingTestCase(\"test_it\")\n runner = self.getRunner(stream=stream)\n d = runner.runAsync(case, untilFailure=True)\n result = self.successResultOf(d)\n # The case is hard-coded to fail on its 5th run.\n self.assertEqual(5, case.n)\n self.assertFalse(result.wasSuccessful())\n output = stream.getvalue()\n\n # It passes each time except the last.\n self.assertEqual(\n output.count(\"PASSED\"),\n case.n - 1,\n \"expected to see PASSED in output\",\n )\n # It also fails at the end.\n self.assertIn(\"FAIL\", output)\n\n # It also reports its progress.\n for i in range(1, 6):\n self.assertIn(f\"Test Pass {i}\", output)\n\n # It also reports the number of tests run as part of each iteration.\n self.assertEqual(\n output.count(\"Ran 1 tests in\"),\n case.n,\n \"expected to see per-iteration test count in output\",\n )", "def test_n_retry(self):\n r = retrying.retry(stop_max_attempt_number=10)(fail_n(9))\n\n fake_time = FakeTime()\n with fake_time:\n r()\n self.assertEqual(fake_time.mock_sleep.calls, 9)", "def test_batch_exit_code(self):\n cmd = self.run_salt(\n ' \"*\" state.single test.fail_without_changes name=test_me -b 25%',\n with_retcode=True,\n timeout=self.run_timeout,\n )\n self.assertEqual(cmd[-1], 2)", "def test_retry_success(self):\n @utils.retry(ExceptionToCheck=BaseException, tries=3, backoff=0,\n delay=1)\n def _success():\n \"\"\"Return True after retry.\"\"\"\n self.count += 1\n if self.count == 3:\n return True\n else:\n raise BaseException\n\n self.count = 0\n self.assertEquals(_success(), True)", "def test_attempts_exhausted_true(sudoku_board):\n s = sudoku_board\n s._attempts_so_far = 5\n s._max_attempts = 6\n\n assert not s.attempts_exhausted", "def test_fallback_channel_delivery_failure_less_than_5(self):\n event = Event.objects.create()\n event.fallback_channel = True\n event.status = Event.FAILED\n event.recipient_id = \"27820001001\"\n event.timestamp = timezone.now() + timedelta(days=2)\n event.save()\n\n with patch(\"eventstore.tasks.rapidpro\") as p:\n handle_event(event)\n\n p.create_flow_start.assert_not_called()\n df = DeliveryFailure.objects.get(contact_id=\"27820001001\")\n self.assertEqual(df.number_of_failures, 1)", "def testFastbootError(self, mock_subp_check_output):\n with validUnlockCredsZip() as zip:\n for n in range(5):\n mock_subp_check_output.reset_mock()\n mock_subp_check_output.side_effect = makeFastbootCommandFake(\n self, error_on_command_number=n)\n self.assertNotEqual(main([zip.name]), 0)\n self.assertNotEqual(mock_subp_check_output.call_count, 0)", "def _retry_5(fun):\n return _retry(fun, n=5, sleep_s=1)", "def test_six_pairs_by_rng(self):\n self.check_for_six_pairs = check_for_six_pairs_instant_win(self.list_of_players)\n self.assertFalse(self.check_for_six_pairs, \"Instant win by 6 pairs!!!\")", "def test_4():\n assert multiples_of_3_and_5(8456) == 16687353", "def test_sequence_failure(self):\n\n success1 = Stub(Result.Success)\n success2 = Stub(Result.Success)\n continue3 = Stub(Result.Continue)\n failure4 = Stub(Result.Failure)\n failure5 = Stub(Result.Failure)\n\n sequence = Sequence(\"Failing Sequence\")\n sequence.append(success1)\n sequence.append(success2)\n sequence.append(continue3)\n sequence.append(failure4)\n sequence.append(failure5)\n\n sequence.run(None)\n continue3.return_value = Result.Success\n result = sequence.run(None)\n\n self.assertEqual(Result.Failure, result)\n self.assertEqual(1, success1.calls)\n self.assertEqual(1, success2.calls)\n self.assertEqual(2, continue3.calls)\n self.assertEqual(1, failure4.calls)\n self.assertEqual(0, failure5.calls)", "def test_5():\n assert multiples_of_3_and_5(19564) == 89301183", "def three_hundred_sadness():\n TraceStats.failures += 1\n\n if TraceStats.failures >= 10:\n print \"\\n\\n\"\n print colored(\"\\tHas been detected ten time a complete Traceroute failure\", \"red\")\n print colored(\"\\tMaybe the network is down, maybe your host is filtering ICMP\", \"red\")\n print colored(\"\\tIn both cases, the test is interrupted.\", \"red\")\n print \"\\n\\n\"\n quit(-1)", "def last_test_passing(last_runned,ret):\n # See https://stackoverflow.com/questions/39945858/cmake-testing-causing-error-when-tests-fail\n#enum {\n# UPDATE_ERRORS = 0x01,\n# CONFIGURE_ERRORS = 0x02,\n# BUILD_ERRORS = 0x04,\n# TEST_ERRORS = 0x08,\n# MEMORY_ERRORS = 0x10,\n# COVERAGE_ERRORS = 0x20,\n# SUBMIT_ERRORS = 0x40\n#};\n if not(ret==0 or ret & 0x08 or ret & 0x10 or ret & 0x20 or ret & 0x40):# We try to also handle the case where CTest does not respect the enum and crash or whatever)\n my_print(\"Lazy test wont mark any target because of this ctest exit status:\",ret)\n return [] # Nothing could have passed.\n\n try:\n with open(\"Testing/Temporary/LastTestsFailed.log\") as f:\n wholeFile= f.read()\n failing = re.findall(r'^\\d:(.*)\\S*$', wholeFile)\n except FileNotFoundError:# Ninja dont generate if no fail\n failing=[]\n\n return [ x for x in last_runned if x not in failing]", "def test_3():\n assert multiples_of_3_and_5(1000) == 233168", "def test_login_attempt_too_many_recent_failures(app):\n username = 'foo'\n remote_addr = '127.0.0.1'\n app.config['XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS'] = 1\n app.config['XL_AUTH_FAILED_LOGIN_TIMEFRAME'] = 5 * 60\n\n assert FailedLoginAttempt.too_many_recent_failures_for(username) is False\n\n login_attempt = FailedLoginAttempt(username, remote_addr)\n login_attempt.save()\n\n assert FailedLoginAttempt.too_many_recent_failures_for(username) is True\n\n login_attempt.created_at = datetime.utcnow() - timedelta(seconds=10 * 60)\n login_attempt.save()\n\n assert FailedLoginAttempt.too_many_recent_failures_for(username) is False", "def testMaxGuesses(self):\n # ------------------------------\n # Try to access non-existent link (404).\n # ------------------------------\n url = self.get_url('/test/abcd12345678')\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 404)\n\n # ------------------------------\n # No more guesses should be allowed (403).\n # ------------------------------\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 403)\n\n # ------------------------------\n # Not even correct guesses (403).\n # ------------------------------\n response = self._RunAsync(self.http_client.fetch, self._url, method='GET')\n self.assertEqual(response.code, 403)\n\n # ------------------------------\n # But do allow guesses on other group ids.\n # ------------------------------\n url = self.get_url('/test/another12345678')\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 404)\n\n # ------------------------------\n # Now \"wait\" 24 hours and make sure another guess is allowed.\n # ------------------------------\n util._TEST_TIME += constants.SECONDS_PER_DAY\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 404)", "def test_wrong_number_of_setup_items(self):\n msg = '#aaa 99999\\n' # Three items required, two given\n for byte in msg[:-1]:\n self.assertTrue(self.system.parse(byte))\n with self.assertRaisesRegex(ValueError, 'must have three items'):\n self.system.parse(msg[-1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that we retry when AWS returns a 503 status.
def test_503(self): function = aws_service.retry_boto( self._fail_for_n_calls, initial_sleep_seconds=0.0) function(5, status=503)
[ "def test_retry_request_behavior(mocker):\n failed_query = make_mock_failing_query(503)\n mocker.patch.object(LHorizon, \"query\", failed_query)\n futile_lhorizon = [LHorizon()]\n start = time.time()\n try:\n query_all_lhorizons(futile_lhorizon, delay_retry=0.1, max_retries=2)\n except TimeoutError:\n assert time.time() - start > 0.3\n return\n\n raise ValueError(\"did not correctly halt on multiple retries\")", "def test_ssl_error(self):\n\n def raise_ssl_error():\n self.num_calls += 1\n if self.num_calls <= 5:\n raise ssl.SSLError('Test')\n\n aws_service.retry_boto(raise_ssl_error, initial_sleep_seconds=0.0)()", "def retry_503(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except HttpError as e:\n log.error(\"HTTP Error calling Google Analytics: %s\", e)\n\n if e.resp.status == 503:\n return f(*args, **kwargs)\n\n return wrapper", "def test_call_raises_http_error_after_max_retries_when_status_code_in_retry_list(self):\n max_retries = 3\n self.make_retry_call_with_error_code(503, max_retries=max_retries)\n # Check that the request call was made max_retries + 1 times. The +1 is\n # to account for the initial request call.\n self.assertEqual(max_retries + 1, self.session.request.call_count,\n \"Call should have been made 'max_retries' + 1 times\")", "def test_retry_after(silver_client):\n response = make_requests(silver_client)\n\n assert response.status_code == 429\n\n assert \"retry-after\" in response.headers\n\n wait_until_retry_after(response)\n\n assert_limit_works(silver_client, limit=10)", "def _http_service_unavailable(start_response, delta):\n response_headers = [\n ('Content-Length', '0'),\n ('Retry-After', delta or str(get_conf().retry_after))\n ]\n start_response('503 Service Unavailable', response_headers)\n return []", "def test_five_failures(self):\n function = aws_service.retry_boto(\n self._fail_for_n_calls,\n r'InvalidInstanceID\\.NotFound',\n initial_sleep_seconds=0.0\n )\n function(5)", "def raiseRetryException():\n resp = adpt.Response('reqmethod', 'reqpath', c.HTTPStatus.ETAG_MISMATCH,\n 'reason', 'headers')\n http_exc = pvm_exc.HttpError(resp)\n raise http_exc", "def test_call_raises_http_error_immediately_when_status_code_not_in_retry_list(self):\n self.make_retry_call_with_error_code(404, max_retries=3)\n self.assertEqual(1, self.session.request.call_count,\n \"Request call should have been made only once\")", "def test_get_error_with_retry(mock_get):\n mock_get.return_value.content = get_mock_response()\n mock_get.return_value.status_code = 504\n err_response = Response()\n err_response.status_code = 504\n mock_get.return_value.raise_for_status.side_effect = requests.exceptions.HTTPError(response=err_response)\n token = 'sdksdk203afdsfj_sadasd3939'\n client = Apptuit(sanitize_mode=None, token=token)\n query = \"fetch('nyc.taxi.rides')\"\n start = 1406831400\n end = 1407609000\n with assert_raises(ApptuitException):\n client.query(query, start, end, retry_count=1)\n assert_equals(2, mock_get.call_count)", "def handle_first_503(self, http_err, retry_count, task_row_data): # pylint: disable=unused-argument\n fn_name = \"handle_first_503(): \"\n if retry_count == settings.NUM_API_TRIES-1 and get_http_error_status(http_err) == 503:\n # Log first 503 as an Info level, because\n # (a) There are a frequent 503 errors\n # (b) Almost all 503 errors recover after a single retry\n logging.info(\"%sFirst 503 HTTP error, so may not be an error (yet). %s attempt remaining\",\n fn_name, retry_count)\n logservice.flush()\n return True, False\n\n return False, False", "def retry_on_error_requests(exc):\n return _retry_on_error(exc, \"requests\")", "def test_service_unavailable_result(self):\n process_result = process_response(self.resp_service_unavailable)\n self.assertEqual(process_result[\"result\"], 4)", "def test_is_healthy_bad_route():\n client = meilisearch.Client(\"http://wrongurl:1234\", timeout=1)\n response = client.is_healthy()\n assert response is False", "def test_basic_get_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.get(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None", "def tests_request_zeep_error(self):\n\n serialize_responses = [ZeepFault(\"test\") for x in range(MAX_RETRY_ATTEMPTS)]\n with patch(\"zeep.Client\"), patch(\"time.sleep\", autospec=True), patch(\n \"zeep.helpers.serialize_object\", side_effect=serialize_responses\n ) as mock_request, pytest.raises(ServiceUnavailable):\n client = TotalConnectClient(\n \"username\", \"password\", usercodes=None, retry_delay=0\n )\n assert mock_request.call_count == MAX_RETRY_ATTEMPTS\n assert client.is_logged_in() is False", "def _raise_exception_for_repeated_timeouts():\n _failure_msg = \"The script was unable to complete successfully after five consecutive API timeouts. \" + \\\n \"Please run the script again or contact Khoros Support for further assistance.\"\n logger.error(_failure_msg)\n raise errors.exceptions.APIConnectionError(_failure_msg)", "def test_basic_patch_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.patch(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None", "def _retry(self, request, *args, **kwargs):\n for attempts_left in range(self.max_retries + 1, -1, -1):\n try:\n result = request(*args, **kwargs)\n except requests.HTTPError as e:\n if e.response.status_code >= 500 and attempts_left > 0:\n logging.info(\n 'Server error ({} attempts left). Timeouts and retries '\n 'in {}.'.format(attempts_left, self.retry_timeout))\n time.sleep(self.retry_timeout)\n else:\n raise\n else:\n break\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that we retry on ssl.SSLError. This is a case that was seen in the field.
def test_ssl_error(self): def raise_ssl_error(): self.num_calls += 1 if self.num_calls <= 5: raise ssl.SSLError('Test') aws_service.retry_boto(raise_ssl_error, initial_sleep_seconds=0.0)()
[ "def test_http_ssl_error(mock_base_http_request, client):\n # Configure\n mock_base_http_request.side_effect = DemistoException('SSLError')\n # Execute\n with pytest.raises(SSLError) as e:\n client.http_request('GET', MOCK_TEST_URL_SUFFIX)\n\n # Assert\n assert (\n str(e.value)\n == \"SSL Certificate Verification Failed - try selecting 'Trust any certificate' checkbox \"\n 'in the integration configuration.'\n )", "def test_doesNotSwallowOtherSSLErrors(self):\n def raiser(_):\n # Unfortunately, there seems to be no way to trigger a real SSL\n # error artificially.\n raise SSL.Error([['', '', '']])\n ctx = FakeContext(SSL.SSLv23_METHOD)\n ctx.set_cipher_list = raiser\n self.patch(sslverify.SSL, 'Context', lambda _: ctx)\n self.assertRaises(\n SSL.Error,\n sslverify._expandCipherString, u'ALL', SSL.SSLv23_METHOD, 0\n )", "def _handle_ssl_exception(self, err):\n if err.args[0] == ssl.SSL_ERROR_WANT_READ:\n logger.debug(\"SSL client {0} want read\".format(self._address))\n return False\n elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:\n logger.debug(\"SSL client {0} want write\".format(self._address))\n self._write_watcher.start()\n return False\n elif err.args[0] == ssl.SSL_ERROR_EOF:\n self.stop(msg=\"SSL EOF for peer {0}, connection closed\")\n return False\n else:\n return True", "def test_https_expired(self):\n domain = inspect(\"expired.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_expired_cert)", "def test_failedCertificateVerification(self):\n onServerLost = defer.Deferred()\n onClientLost = defer.Deferred()\n self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,\n certificate=self.sCert, verify=False,\n requireCertificate=False),\n sslverify.OpenSSLCertificateOptions(verify=True,\n requireCertificate=False, caCerts=[self.cCert]),\n onServerLost=onServerLost,\n onClientLost=onClientLost)\n\n d = defer.DeferredList([onClientLost, onServerLost],\n consumeErrors=True)\n def afterLost(result):\n ((cSuccess, cResult), (sSuccess, sResult)) = result\n self.assertFalse(cSuccess)\n self.assertFalse(sSuccess)\n\n return d.addCallback(afterLost)", "def test_retry_request_behavior(mocker):\n failed_query = make_mock_failing_query(503)\n mocker.patch.object(LHorizon, \"query\", failed_query)\n futile_lhorizon = [LHorizon()]\n start = time.time()\n try:\n query_all_lhorizons(futile_lhorizon, delay_retry=0.1, max_retries=2)\n except TimeoutError:\n assert time.time() - start > 0.3\n return\n\n raise ValueError(\"did not correctly halt on multiple retries\")", "def test_https_bad_chain(self):\n domain = inspect(\"untrusted-root.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_bad_chain)", "def ssl_check():\n return \"All ok, mm'kay.\"", "def sslbesessionmultiplexattemptfailsrate(self) :\n try :\n return self._sslbesessionmultiplexattemptfailsrate\n except Exception as e:\n raise e", "def test_https_bad_hostname(self):\n domain = inspect(\"wrong.host.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_bad_hostname)", "def retry_on_error_requests(exc):\n return _retry_on_error(exc, \"requests\")", "def test_validHostnameInvalidCertificate(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"valid.example.com\",\n u\"valid.example.com\",\n validCertificate=False,\n )\n\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, SSL.Error)\n self.assertIsInstance(sErr, SSL.Error)", "def set_retry(self, re):\n _ldns.ldns_resolver_set_retry(self,re)\n #parameters: ldns_resolver *,uint8_t,\n #retvals: ", "def as_retryable(error):\n error.retryable = True\n\n return error", "def test_basic_patch_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.patch(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None", "def test_basic_get_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.get(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None", "def test_refusedAnonymousClientConnection(self):\n onServerLost = defer.Deferred()\n onClientLost = defer.Deferred()\n self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,\n certificate=self.sCert, verify=True,\n caCerts=[self.sCert], requireCertificate=True),\n sslverify.OpenSSLCertificateOptions(\n requireCertificate=False),\n onServerLost=onServerLost,\n onClientLost=onClientLost)\n\n d = defer.DeferredList([onClientLost, onServerLost],\n consumeErrors=True)\n\n\n def afterLost(result):\n ((cSuccess, cResult), (sSuccess, sResult)) = result\n self.assertFalse(cSuccess)\n self.assertFalse(sSuccess)\n # Win32 fails to report the SSL Error, and report a connection lost\n # instead: there is a race condition so that's not totally\n # surprising (see ticket #2877 in the tracker)\n self.assertIsInstance(cResult.value, (SSL.Error, ConnectionLost))\n self.assertIsInstance(sResult.value, SSL.Error)\n\n return d.addCallback(afterLost)", "def test_call_raises_http_error_immediately_when_status_code_not_in_retry_list(self):\n self.make_retry_call_with_error_code(404, max_retries=3)\n self.assertEqual(1, self.session.request.call_count,\n \"Request call should have been made only once\")", "def fatal_request_error(err=None):\n if not err or not err.response:\n return False\n if err.response.status_code == 403:\n # download url needs to be refreshed, give up on backoff\n return True\n # retry 4xx or 5xx and all else not\n return not 400 <= err.response.status_code <= 600" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test waiting for an instance to terminate.
def test_wait_for_instance_terminated(self): aws_svc, encryptor_image, guest_image = build_aws_service() instance = aws_svc.run_instance(guest_image.id) aws_svc.terminate_instance(instance.id) result = encrypt_ami.wait_for_instance( aws_svc, instance.id, state='terminated', timeout=100) self.assertEquals(instance, result)
[ "def test_wait_for_instance_unexpectedly_terminated(self):\n aws_svc, encryptor_image, guest_image = build_aws_service()\n instance = aws_svc.run_instance(guest_image.id)\n aws_svc.terminate_instance(instance.id)\n try:\n encrypt_ami.wait_for_instance(\n aws_svc, instance.id, state='running', timeout=100)\n except encrypt_ami.InstanceError as e:\n self.assertTrue('unexpectedly terminated' in e.message)", "def test_stop_timeout_ok(self):\n ok(start(WaitingService()).stop(block=1))", "def wait_for_instances_to_stop(conn, instance_ids, pending_ids):\n reservations = conn.get_all_instances(instance_ids=pending_ids)\n for reservation in reservations:\n for instance in reservation.instances:\n print \"State: \" + instance.state\n if instance.state == 'terminated':\n print \"instance `{\" + instance.id + \"}` terminated!\"\n pending_ids.pop(pending_ids.index(instance.id))\n else:\n print \"instance `{\" + instance.id + \"}` stopping...\"\n if len(pending_ids) == 0:\n print \"all instances terminated!\"\n else:\n time.sleep(10)\n wait_for_instances_to_stop(conn, instance_ids, pending_ids)", "def test_stop(self):\n guest_name = \"some guest\"\n parameters_stop = {}\n self._mock_virsh.return_value.is_defined.return_value = True\n self._mock_virsh.return_value.is_running.return_value = True\n self._hyp.login()\n self._hyp.stop(guest_name, parameters_stop)\n self._mock_virsh.return_value.shutdown.assert_called_with(\n guest_name, timeout=mock.ANY)", "def test_kill_event() -> None:\n with FailureNotifier(\"test\", verbose=0, debug=True):\n print(\"Sleeping...\")\n time.sleep(30.0)\n print(\"Done Sleeping, you were too late!\")\n raise ValueError(\"Fuck\")", "async def wait_until_stop(self):\n await self._driver.wait_until_stop()", "def test_stop(self):\n \n # Do the same things as in test_with_block, but stop the VirtualPump\n # manually.\n time.sleep(0.2)\n original_count = threading.active_count()\n \n vp = VirtualPump()\n self.assertEqual(threading.active_count(), original_count + 2)\n \n vp.stop()\n time.sleep(0.2)\n self.assertEqual(threading.active_count(), original_count)", "def test_kill_not_running(self):\n TimedService().kill()", "def _wait_for_instance_running_state(self):\n\n assert self._instance\n\n tries = 0\n start_time = time.time()\n while True:\n try:\n tries += 1\n msg = 'Waiting for instance to run, tries=%s.' % (tries,)\n log.info(msg)\n self._store_message(msg)\n self._instance.update()\n if self._instance.state == 'running':\n break\n except Exception, e:\n msg = 'ERROR %s: %s' % (type(e), e)\n log.exception(msg)\n self._store_message(msg, 'error')\n\n if (self._running_state_check_timeout and\n time.time() - start_time >\n self._running_state_check_timeout):\n msg = 'Gave up trying to wait for EC2 instance to run.'\n log.error(msg)\n self._store_message(msg, 'error')\n break\n time.sleep(0.1)", "def test_stop_not_running(self):\n self._mock_virsh.return_value.is_defined.return_value = True\n self._mock_virsh.return_value.is_running.return_value = False\n self._hyp.login()\n self._mock_virsh.return_value.shutdown.assert_not_called()", "def test_poll_until_timeout(self):\n self.sg.group_id = 'abc'\n self.sg.get_scaling_group_state = self.get_scaling_group_state_timeout\n\n d = self.sg.wait_for_state(None, HasActive(2), clock=self.clock)\n for _ in range(59):\n self.clock.advance(10)\n self.assertNoResult(d)\n self.clock.advance(10)\n self.failureResultOf(d, TimedOutError)", "def test_stop_timeout_fail(self):\n ok(not start(ForeverService()).stop(block=1))", "def test_terminate(self):\n self.terminated = False\n\n def fakeTerminateProcess():\n self.terminated = True\n\n clock = task.Clock()\n pp = EchoProcessProtocol(clock)\n pp.terminateProcess = fakeTerminateProcess\n transport = proto_helpers.StringTransport()\n pp.makeConnection(transport)\n\n self.assertFalse(self.terminated)\n clock.advance(10)\n self.assertTrue(self.terminated)", "def test_halt_and_wait(self, debug_session):\n debug_session.connect()\n\n debug_session.halt(wait=True)", "def test_stop_not_running(self):\n TimedService().stop()", "def _wait_instance_boot(self):\n if not self.ALLOW_PORTS or _utl.check_port(self.host_ip, 80):\n # Avoid to show message if already booted or not\n return\n\n _get_logger().info(\"Waiting instance boot...\")\n _sleep(self._TIMEOUT_SLEEP)\n if not _utl.check_port(self.host_ip, 80, timeout=self.TIMEOUT,\n sleep=self._TIMEOUT_SLEEP):\n raise _exc.HostRuntimeException(gen_msg=('timeout', \"boot\"))", "def test_stop_run(self):\n pass", "def exit_during_step_inst_test(self):\n self.exit_during_step_base(\"thread step-inst -m all-threads\", 'stop reason = instruction step')", "def waitForExitNotification(timeout):\n\tntf = _thread.wait(timeout)\n\tif ntf == _thread.EXIT:\n\t\treturn True\n\treturn False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that we raise an exception when an instance goes into an error state while we're waiting for it.
def test_instance_error_state(self): aws_svc, encryptor_image, guest_image = build_aws_service() instance = aws_svc.run_instance(guest_image.id) instance._state.name = 'error' try: encrypt_ami.wait_for_instance(aws_svc, instance.id, timeout=100) except encrypt_ami.InstanceError as e: self.assertTrue('error state' in e.message)
[ "def testImmediateException(self):\n def _OnException(type, value, tb):\n self.stop()\n\n with util.ExceptionBarrier(_OnException):\n raise Exception('an error')\n self.wait()", "def test_block_on_lease_error(self, fake_sleep):\n fake_lease = MagicMock()\n fake_lease.state = virtual_machine.vim.HttpNfcLease.State.error\n\n with self.assertRaises(RuntimeError):\n virtual_machine._block_on_lease(fake_lease)", "def test_cant_call_while_running():\n\n def rerun(instance):\n \"\"\"\n call the transition again\n \"\"\"\n instance.do_thing(None)\n\n x = get_thing()\n\n with pytest.raises(TransitionNotAllowed):\n x.do_thing(rerun)\n\n # ensure the target transition is set when the process is done\n assert x.state == x.CHOICES.error", "def testUnhandledExeption(self):\n success = [False]\n\n def _Op(cb):\n raise ZeroDivisionError('exception')\n\n def _OnSuccess():\n success[0] = True\n\n def _RunBarrier():\n with util.Barrier(_OnSuccess) as b:\n _Op(b.Callback())\n\n self.assertRaises(ZeroDivisionError, _RunBarrier)\n self.assertTrue(not success[0])", "def test_block_on_lease_never_ready(self, fake_sleep):\n fake_lease = MagicMock()\n fake_lease.state = 'foo'\n\n with self.assertRaises(RuntimeError):\n virtual_machine._block_on_lease(fake_lease)", "def test_exception_during_action(self, event, states):\n ae = AE()\n ae.add_requested_context(Verification)\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n\n assoc = Association(ae, mode=\"requestor\")\n fsm = assoc.dul.state_machine\n fsm.dul = BadDUL()\n\n for state in states:\n fsm.dul.is_killed = False\n state = \"Sta{}\".format(state)\n fsm.current_state = state\n with pytest.raises(AttributeError):\n fsm.do_action(event)\n assert fsm.dul.is_killed is True\n assert fsm.current_state == state", "def test_wait_for_instance_unexpectedly_terminated(self):\n aws_svc, encryptor_image, guest_image = build_aws_service()\n instance = aws_svc.run_instance(guest_image.id)\n aws_svc.terminate_instance(instance.id)\n try:\n encrypt_ami.wait_for_instance(\n aws_svc, instance.id, state='running', timeout=100)\n except encrypt_ami.InstanceError as e:\n self.assertTrue('unexpectedly terminated' in e.message)", "async def test_invalid_wait_time(opp: OpenPeerPower, mock_account):\n await setup_integration(opp, mock_account, PLATFORM_DOMAIN)\n\n vacuum = opp.states.get(VACUUM_ENTITY_ID)\n assert vacuum\n assert vacuum.state == STATE_DOCKED\n\n with pytest.raises(MultipleInvalid):\n await opp.services.async_call(\n DOMAIN,\n SERVICE_SET_WAIT_TIME,\n {ATTR_ENTITY_ID: VACUUM_ENTITY_ID, \"minutes\": 10},\n blocking=True,\n )\n assert not mock_account.robots[0].set_wait_time.called", "def _raise_thread_errors(self):\n while not self._rx_q.empty():\n item = self._rx_q.get()\n if isinstance(item, Exception):\n raise item\n self.close()", "def test_failure(sched):\n a = sched.add_node('a')\n b = sched.add_node('b')\n b2 = sched.add_node('b2')\n a.depends_on(b)\n a.depends_on(b2)\n\n sched.initialize()\n b.submit()\n stat = sched.status()\n assert stat['submitted'] == [b]\n\n b.failed('timeout')\n assert a.state == 'waiting'\n assert b2.state == 'ready'\n assert b.state == 'ready'\n assert len(b.failures) == 1", "async def test_perform_failure(self):\n boxes = []\n e = Effect(boxes.append)\n future = asyncio_perform(func_dispatcher, e)\n assert not future.done()\n boxes[0].fail((ValueError, ValueError(\"oh dear\"), None))\n with pytest.raises(ValueError):\n await future", "def test_runFailure(self):\n # Give it a broken worker pool factory. There's no exception handling\n # for such an error in the implementation..\n class BrokenFactory(Exception):\n pass\n\n def brokenFactory(*args, **kwargs):\n raise BrokenFactory()\n\n runner = self.getRunner(workerPoolFactory=brokenFactory)\n with self.assertRaises(BrokenFactory):\n runner.run(self.suite)", "def test_unlocks_when_exception_raised(self):\n pool = SimplePool()\n try:\n with pool.take() as x:\n with pool.take() as y:\n raise RuntimeError\n except:\n self.assertEqual(2, len(pool.elements))\n for e in pool.elements:\n self.assertFalse(e.claimed)", "def test_exception(self, _print):\n # Since the weak refs are cleaned up lazily, grab strong references to\n # any that are currently alive to prevent our baseline from changing\n # under us.\n refs_at_start = set([ref() for ref in\n actor._tracked_refs_by_idx.values()])\n num_refs_at_start = len(refs_at_start)\n\n # Now do our test: leak a result with an exception attached.\n ar = actor.TrackedAsyncResult(\"foo\")\n ar.set_exception(Exception())\n self.assertEqual(num_refs_at_start + 1, len(actor._tracked_refs_by_idx))\n del ar # Enough to trigger cleanup in CPython, with exact ref counts.\n gc.collect() # For PyPy, we have to force a cleanup\n self._m_exit.assert_called_once_with(1)\n self.assertTrue(_print.called)\n self.assertTrue(\"foo\" in _print.call_args[0][0])\n self._m_exit.reset_mock()\n\n # Re-grab the set of references for comparison\n refs_at_end = set([ref() for ref in\n actor._tracked_refs_by_idx.values()])\n num_refs_at_end = len(refs_at_end)\n self.assertEqual(refs_at_start, refs_at_end,\n \"%s exceptions may have been leaked: %s\" %\n (num_refs_at_end - num_refs_at_start,\n actor._tracked_refs_by_idx))", "def check_and_raise_error(self):\n if not self.has_error:\n return\n logger.debug(\"Thread error caught: %s\", self.errors)\n error = self.errors[0]\n raise error[1].with_traceback(error[2])", "def test_kill_event() -> None:\n with FailureNotifier(\"test\", verbose=0, debug=True):\n print(\"Sleeping...\")\n time.sleep(30.0)\n print(\"Done Sleeping, you were too late!\")\n raise ValueError(\"Fuck\")", "def test_run_node_failed(self):\r\n self.driver = AbiquoNodeDriver('ten', 'shin',\r\n 'http://dummy.host.com/api')\r\n node = self.driver.list_nodes()[0]\r\n # Node is in the correct state, but it fails because of the\r\n # async task and it raises the error.\r\n self.assertRaises(LibcloudError, self.driver.ex_run_node, node)", "def _failed():\n raise BaseException", "def test_bails_out_early_on_error(self):\n pause_calls = []\n\n def maybe_kill(svc):\n if svc == \"haproxy\":\n return False\n else:\n pause_calls.append(svc)\n return True\n\n self.service_pause.side_effect = maybe_kill\n self.assertRaisesRegexp(\n Exception, \"haproxy didn't stop cleanly.\",\n actions.actions.pause, self.args)\n self.assertEqual(pause_calls, [\"swift-proxy\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that we handle the edge case when an instance is terminated on startup.
def test_wait_for_instance_unexpectedly_terminated(self): aws_svc, encryptor_image, guest_image = build_aws_service() instance = aws_svc.run_instance(guest_image.id) aws_svc.terminate_instance(instance.id) try: encrypt_ami.wait_for_instance( aws_svc, instance.id, state='running', timeout=100) except encrypt_ami.InstanceError as e: self.assertTrue('unexpectedly terminated' in e.message)
[ "def test_wait_for_instance_terminated(self):\n aws_svc, encryptor_image, guest_image = build_aws_service()\n instance = aws_svc.run_instance(guest_image.id)\n aws_svc.terminate_instance(instance.id)\n result = encrypt_ami.wait_for_instance(\n aws_svc, instance.id, state='terminated', timeout=100)\n self.assertEquals(instance, result)", "def test_kill_not_running(self):\n TimedService().kill()", "def test_usage_05_restart_mysql(self):\n tc_name = \"Usage 05 tests\"\n tc_num = 05\n instance_id = self.instance_id_list[0]\n self.client.instances.restart(instance_id)\n self.assertEqual(str(testutil.get_last_response_code(self.client)), \"202\",\n \"Error: Resize instance. Unexpected resp code: %r != %r\"\n % (str(testutil.get_last_response_code(self.client)), \"202\"))\n # check interim status of REBOOT\n testutil.wait_for_status(self.client, instance_id, \"REBOOT\")\n # wait for active, ensure time elapsed, record the duration\n status, elapsed_time = testutil.waitForActive(self.client,\n instanceId=instance_id)\n self.fixture_log.debug(\"Inst: %r is: %r after: %r seconds\" %\n (instance_id, status, elapsed_time))\n running_time = (datetime.utcnow() - self.starttime_list[tc_num]).seconds\n if 10 * self.ONEMIN > running_time:\n time.sleep((10 * self.ONEMIN) - running_time)\n # delete the ACTIVE instance\n if testutil.getInstanceStatus(self.client, instance_id) == \"ACTIVE\":\n self.client.instances.get(instance_id).delete()\n self.instance_id_list.remove(instance_id)\n duration = datetime.utcnow() - self.starttime_list[tc_num]\n rootAction = \"reddwarf.instance.delete\"\n # AH Event Sent - Check AH data data AFTER the DELETE\n time.sleep(self.AHDELAY)\n AHEventsList = self.dbaas_atomhopper_provider.events_by_resourceId(instance_id)\n single_event = [event for event in AHEventsList\n if event.rootAction == rootAction].pop()\n self.assertEqual(single_event.resourceId, instance_id,\n 'AH resourceID:%r != created instanceID:%r'\n % (single_event.resourceId, instance_id))\n testutil.valid_duration(duration, single_event, self.dateFormat)", "def test_stop_not_running(self):\n self._mock_virsh.return_value.is_defined.return_value = True\n self._mock_virsh.return_value.is_running.return_value = False\n self._hyp.login()\n self._mock_virsh.return_value.shutdown.assert_not_called()", "async def test_graceful_shutdown(self):\n await graceful_shutdown(self.mock_application)\n self.mock_db_conn.close.assert_awaited_once()", "def test_stop(self):\n guest_name = \"some guest\"\n parameters_stop = {}\n self._mock_virsh.return_value.is_defined.return_value = True\n self._mock_virsh.return_value.is_running.return_value = True\n self._hyp.login()\n self._hyp.stop(guest_name, parameters_stop)\n self._mock_virsh.return_value.shutdown.assert_called_with(\n guest_name, timeout=mock.ANY)", "def _test_hangup(self):\n return", "def test_stop_run(self):\n pass", "def test_reboot_not_running(self):\n guest_name = \"some guest\"\n parameters_stop = {}\n self._mock_virsh.return_value.is_defined.return_value = True\n self._mock_virsh.return_value.is_running.return_value = False\n self._hyp.login()\n self._hyp.reboot(guest_name, parameters_stop)\n self._mock_virsh.return_value.shutdown.assert_not_called()\n self._mock_virsh.return_value.start.assert_called_with(guest_name)", "def test_bails_out_early_on_error(self):\n resume_calls = []\n\n def maybe_kill(svc):\n if svc == \"apache2\":\n return False\n else:\n resume_calls.append(svc)\n return True\n\n self.service_resume.side_effect = maybe_kill\n self.assertRaisesRegexp(\n Exception, \"apache2 didn't start cleanly.\",\n actions.actions.resume, self.args)\n self.assertEqual(resume_calls, ['swift-proxy', 'memcached'])", "def test_instance_error_state(self):\n aws_svc, encryptor_image, guest_image = build_aws_service()\n instance = aws_svc.run_instance(guest_image.id)\n instance._state.name = 'error'\n try:\n encrypt_ami.wait_for_instance(aws_svc, instance.id, timeout=100)\n except encrypt_ami.InstanceError as e:\n self.assertTrue('error state' in e.message)", "def test_he_vm_restart(self):\n self.stop_service_and_check_he_vm(service_name=conf.POSTGRESQL_SERVICE)", "def test_post_teardown_ref(self):\n self.teardown()\n assert_raises(RuntimeError, getattr, self, 'instance')", "def test_terminate_only_closes_if_not_terminated(self):\n session = mock.Mock()\n session._terminate_lock = mock.MagicMock()\n session._terminated = True\n Sl4aSession.terminate(session)\n\n self.assertFalse(session._event_dispatcher.close.called)\n self.assertFalse(session.rpc_client.terminate.called)", "def test_onleave_restart(monkeypatch, ms_component: Microservice):\n mock_execl = MagicMock()\n monkeypatch.setattr(os, 'execl', mock_execl)\n ms_component.on_restart()\n\n yield ms_component.fire('leave', ms_component.session, None)\n\n mock_execl.assert_called_once_with(sys.executable, sys.executable,\n *sys.argv)", "def test_deploy_shutdown(self) -> None:\n super().test_deploy_shutdown()", "def test_restart_run(self):\n pass", "def test_bails_out_early_on_error(self):\n pause_calls = []\n\n def maybe_kill(svc):\n if svc == \"haproxy\":\n return False\n else:\n pause_calls.append(svc)\n return True\n\n self.service_pause.side_effect = maybe_kill\n self.assertRaisesRegexp(\n Exception, \"haproxy didn't stop cleanly.\",\n actions.actions.pause, self.args)\n self.assertEqual(pause_calls, [\"swift-proxy\"])", "def test_is_alive_false_on_terminated(self):\n session = mock.Mock()\n session._terminated = True\n session.is_alive = Sl4aSession.is_alive\n self.assertNotEqual(session._terminated, session.is_alive)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns filename for tile with given coordinates
def tilefilename( self, x, y, z, ): tileIndex = x + y * self.tierSizeInTiles[z][0] \ + self.tileCountUpToTier[z] return os.path.join('TileGroup%.0f' % math.floor(tileIndex / 256), '%s-%s-%s.%s' % (z, x, y, self.tileformat))
[ "def get_tile_x_path(tile_dir: str, tile: str) -> str:\n return os.path.join(tile_dir, tile + TILE_X_EXT)", "def get_tile_name_xy(x: int, y: int) -> str:\n return \"srtm_{0:02d}_{1:02d}\".format(x, y)", "def get_tile_name(self, latitude: float, longitude: float) -> str:\n x, y = self.get_tile_xy(latitude, longitude)\n return self.get_tile_name_xy(x, y)", "def filename_for_block_coords(block_x, block_z):\n region_x = block_x >> REGION_WIDTH_BLOCKS_BITS\n region_z = block_z >> REGION_WIDTH_BLOCKS_BITS\n return f\"r.{region_x}.{region_z}.mca\"", "def sprite_path(self) -> str:\n return \"area/{}/tiles/tile{}_{}_0001.png\".format(\n self.sprite_set.name.lower(),\n self.sprite_tile,\n self.sprite_palette + 1,\n )", "def get_fn_from_coords(coords, name=None):\n NS1 = [\"S\", \"N\"][coords[0] > 0]\n EW1 = [\"W\", \"E\"][coords[1] > 0]\n NS2 = [\"S\", \"N\"][coords[2] > 0]\n EW2 = [\"W\", \"E\"][coords[3] > 0]\n new_name = \"%s%0.3g%s%0.3g_%s%0.3g%s%0.3g\" % \\\n (NS1, coords[0], EW1, coords[1], NS2, coords[2], EW2, coords[3])\n if name is not None:\n new_name += '_' + name\n return new_name.replace('.', 'o') + '.tif'", "def _get_file_name(lat, lon) -> Optional[str]:\n ns = 'N' if lat >= 0 else 'S'\n ew = 'E' if lon >= 0 else 'W'\n\n hgt_file = \"%(ns)s%(lat)02d%(ew)s%(lon)03d.hgt\" % {'lat': abs(lat), 'lon': abs(lon), 'ns': ns, 'ew': ew}\n hgt_file_path = os.path.join(DTM_DIR, hgt_file)\n if os.path.isfile(hgt_file_path):\n return hgt_file_path\n else:\n print(f\"{hgt_file_path} not found\")\n return None", "def tiles_info(panoid):\n\n image_url = \"http://cbk0.google.com/cbk?output=tile&panoid={0:}&zoom=5&x={1:}&y={2:}\"\n\n # The tiles positions\n coord = list(itertools.product(range(26), range(13)))\n\n tiles = [(x, y, \"%s_%dx%d.jpg\" % (panoid, x, y), image_url.format(panoid, x, y)) for x, y in coord]\n\n return tiles", "def get_tile_names(tile_count):\n tiles = []\n extension = get_file_extension(os.environ[\"OUTPUT_FORMAT\"])\n for num in range(1, tile_count + 1):\n tiles.append(\"tile_{}.{}\".format(str(num).zfill(3), extension))\n \n return tiles", "def image_filename(cod_setor, coord_id, heading=None):\n if heading is not None:\n return \"IMG_{cod_setor:15d}_{coord_id:03d}_{heading:03d}.jpg\".format(cod_setor=int(cod_setor),coord_id=int(coord_id),heading=int(heading))\n else:\n return \"IMG_{cod_setor:15d}_{coord_id:03d}.jpg\".format(cod_setor=int(cod_setor),coord_id=int(coord_id))", "def tileAt(self, coords):\n return self.tiles.get(tileCenter(coords))", "def generate_map_with_coordinates(topo_params, image_width, image_height, filename):\n return True", "def getImageBaseName(self):\r\n return \"Tiles/{0}.png\".format(self.imageBaseName)", "def get_image_filepath(data_dir, row):\n return os.path.join(data_dir, f\"{row.Species}___{row.Label}\", row.Filename)", "def stich_tiles(panoid, tiles, directory, final_directory):\n\n tile_width = 512\n tile_height = 512\n\n panorama = Image.new('RGB', (26*tile_width, 13*tile_height))\n\n for x, y, fname, url in tiles:\n\n fname = directory + \"/\" + fname\n tile = Image.open(fname)\n\n panorama.paste(im=tile, box=(x*tile_width, y*tile_height))\n\n del tile\n\n# print fname\n\n panorama.save(final_directory + (\"/%s.jpg\" % panoid))\n del panorama", "def tile_coords_to_url(tileX, tileY, zoom, url_template):\n url = ''\n # Random server switching based on choices embedded in the template URL\n switchre = r\"\\{switch:(.*?)\\}\"\n matches = re.finditer(switchre, url_template, re.MULTILINE | re.DOTALL)\n switchedchoice = ''\n for match in matches:\n contents = match.group(1)\n switchedchoice = random.choice(contents.split(',')).strip()\n url = re.sub(switchre, switchedchoice, url_template) \n\n # Replace {x}, {y}, and {z} placeholders with the correct values\n url = re.sub(r\"\\{x\\}\", str(tileX), url)\n url = re.sub(r\"\\{y\\}\", str(tileY), url)\n totalrowsatthiszoomlevel = int(math.pow(2, zoom)) \n url = re.sub(r\"\\{-y\\}\", str((totalrowsatthiszoomlevel - 1) - tileY), url)\n url = re.sub(r\"\\{z\\}|\\{zoom\\}\", str(zoom), url)\n\n # replace {quadkey} with the actual item\n url = re.sub(r\"\\{quadkey\\}\", tile_coords_to_quadkey(tileX, tileY, zoom), url)\n \n # Strip prefixes from urls (JOSM-style urls contain tms information in prefix)\n url = re.sub(r\".*https\\:\\/\\/\", 'https://', url)\n url = re.sub(r\".*http\\:\\/\\/\", 'http://', url)\n \n return url", "def image_filename(im_num=0, pos_num=0, channel_num=0, z_num=0):\n filename = \"img_channel{0:03d}_position{1:03d}_time{2:09d}_z{3:03d}.tif\"\n return filename.format(channel_num, pos_num, im_num, z_num)", "def get_tile(self, coords):\n col = self._get_idx(coords[0], False)\n row = self._get_idx(coords[1], True)\n if col is None or row is None:\n return\n\n return self.grid[row][col]", "def get_tiles_dir(dataset_dir: str) -> str:\n return os.path.join(dataset_dir, TILES_DIR)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing).
def generate_metadata(self): if not os.path.exists(self.output): os.makedirs(self.output) if self.options.profile == 'mercator': (south, west) = self.mercator.MetersToLatLon(self.ominx, self.ominy) (north, east) = self.mercator.MetersToLatLon(self.omaxx, self.omaxy) (south, west) = (max(-85.05112878, south), max(-180.0, west)) (north, east) = (min(85.05112878, north), min(180.0, east)) self.swne = (south, west, north, east) # Generate googlemaps.html if self.options.webviewer in ('all', 'google') \ and self.options.profile == 'mercator': if not self.options.resume \ or not os.path.exists(os.path.join(self.output, 'googlemaps.html')): f = open(os.path.join(self.output, 'googlemaps.html' ), 'w') f.write(self.generate_googlemaps()) f.close() # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if not self.options.resume \ or not os.path.exists(os.path.join(self.output, 'openlayers.html')): f = open(os.path.join(self.output, 'openlayers.html' ), 'w') f.write(self.generate_openlayers()) f.close() elif self.options.profile == 'geodetic': (west, south) = (self.ominx, self.ominy) (east, north) = (self.omaxx, self.omaxy) (south, west) = (max(-90.0, south), max(-180.0, west)) (north, east) = (min(90.0, north), min(180.0, east)) self.swne = (south, west, north, east) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if not self.options.resume \ or not os.path.exists(os.path.join(self.output, 'openlayers.html')): f = open(os.path.join(self.output, 'openlayers.html' ), 'w') f.write(self.generate_openlayers()) f.close() elif self.options.profile == 'raster': (west, south) = (self.ominx, self.ominy) (east, north) = (self.omaxx, self.omaxy) self.swne = (south, west, north, east) # Generate openlayers.html if self.options.webviewer in ('all', 'openlayers'): if not self.options.resume \ or not os.path.exists(os.path.join(self.output, 'openlayers.html')): f = open(os.path.join(self.output, 'openlayers.html' ), 'w') f.write(self.generate_openlayers()) f.close() # Generate tilemapresource.xml. if not self.options.resume \ or not os.path.exists(os.path.join(self.output, 'tilemapresource.xml')): f = open(os.path.join(self.output, 'tilemapresource.xml'), 'w') f.write(self.generate_tilemapresource()) f.close() if self.kml: # TODO: Maybe problem for not automatically generated tminz # The root KML should contain links to all tiles in the tminz level children = [] (xmin, ymin, xmax, ymax) = self.tminmax[self.tminz] for x in range(xmin, xmax + 1): for y in range(ymin, ymax + 1): children.append([x, y, self.tminz]) # Generate Root KML if self.kml: if not self.options.resume \ or not os.path.exists(os.path.join(self.output, 'doc.kml')): f = open(os.path.join(self.output, 'doc.kml'), 'w') f.write(self.generate_kml(None, None, None, children)) f.close()
[ "def mbtiles_metadata(self):\n self.metadata = dict(self.mbtiles.execute('select name, value from metadata;').fetchall())\n (metadata, mime_type) = self.jsonp(self.metadata)\n self.send_file(self.tileset + '.json', metadata, mime_type)\n self.send_file(self.tileset + '/metadata.json', metadata, mime_type)\n self.out('- Uploading metadata.\\n')", "def RegenerateMetaData():\r\n\r\n # Get the posts.\r\n posts = os.listdir(Settings.Settings.webRoot + \"/posts/\") \r\n \r\n # Create meta data dictionary.\r\n metaInfo = {}\r\n \r\n # Grouped by tag. Key is tag, value is list of post md files with tag.\r\n metaInfo[\"byTag\"] = {}\r\n \r\n # Tags per post. Key is post file, value is list of tags.\r\n metaInfo[\"perPostTags\"] = {}\r\n \r\n # Title/filename map. Key is filename, value is post title.\r\n metaInfo[\"byTitle\"] = {}\r\n \r\n # Sorted by date. Value is list of all articles sorted by date.\r\n metaInfo[\"byDate\"] = {}\r\n \r\n # Dictionary of summaries. Key is post filename, value is summary.\r\n metaInfo[\"summaries\"] = {}\r\n \r\n # Collect the data.\r\n for postFile in posts:\r\n \r\n # Open the selected file. \r\n with open(Settings.Settings.webRoot + \"/posts/\" + postFile, 'r') as myfile:\r\n\r\n # Create markdown.\r\n markedDownText = markdown2.markdown(myfile.read(), extras=[\"fenced-code-blocks\", \"metadata\"])\r\n\r\n # Get meta info.\r\n meta = markedDownText.metadata\r\n \r\n pprint.pprint(meta)\r\n \r\n # Add title map entry.\r\n metaInfo[\"byTitle\"][postFile] = meta[\"title\"]\r\n \r\n # Add summary entry.\r\n metaInfo[\"summaries\"][postFile] = meta[\"summary\"]\r\n \r\n # Get list of tags.\r\n tags = [x.strip() for x in meta[\"tags\"].split(',')]\r\n \r\n # Set the per post tags.\r\n metaInfo[\"perPostTags\"][postFile] = [x.strip() for x in meta[\"tags\"].split(',')]\r\n \r\n # Add to tag lists.\r\n for tag in tags:\r\n metaInfo[\"byTag\"].setdefault(tag, [])\r\n metaInfo[\"byTag\"][tag].append(postFile)\r\n \r\n # The date is . separated in Y.M.D format.\r\n dt = datetime.datetime.strptime(meta[\"date\"], '%Y.%m.%d')\r\n \r\n # Pretty severe limitation since we use dates as keys, we can't do two posts\r\n # created on the same day. Warn about it for now.\r\n if dt in metaInfo[\"byDate\"]:\r\n print \"WARNING: already have a post with this date. The old one will not be in the by-date meta dictionary.\"\r\n \r\n # Add it.\r\n metaInfo[\"byDate\"][datetime.datetime.strptime(meta[\"date\"], '%Y.%m.%d')] = postFile\r\n \r\n \r\n # Store the by-date information as a stored dictionary.\r\n #metaInfo[\"byDate\"] = collections.OrderedDict(sorted(metaInfo[\"byDate\"].items()))\r\n # Can't pickle an ordered dict? We will have to sort when we retrieve.\r\n \r\n # Print the meta data for use inspection. \r\n pprint.pprint(metaInfo) \r\n \r\n # Create the pickle.\r\n with open(Settings.Settings.webRoot + \"/meta/meta.pickle\", 'wb') as handle:\r\n pickle.dump(metaInfo, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n \r\n # Test the pickle.\r\n with open(Settings.Settings.webRoot + \"/meta/meta.pickle\", 'rb') as handle:\r\n b = pickle.load(handle)\r\n \r\n # Print the meta data for use inspection. \r\n pprint.pprint(b)", "def generate_overview_tiles(self):\n\n print 'Generating Overview Tiles:'\n\n tilebands = self.dataBandsCount + 1\n\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n for tz in range(self.tmaxz - 1, self.tminz - 1, -1):\n (tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz]\n tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy\n - tminy))\n\n ti = 0\n\n # querysize = tilesize * 2\n\n for tz in range(self.tmaxz - 1, self.tminz - 1, -1):\n (tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz]\n yrange = range(tmaxy, tminy - 1, -1)\n if self.options.leaflet:\n yrange = range(tminy, tmaxy + 1)\n for ty in yrange:\n for tx in range(tminx, tmaxx + 1):\n\n if self.stopped:\n break\n\n ti += 1\n tilefilename = os.path.join(self.output, str(tz),\n str(tx), '%s.%s' % (2**tz-1-ty, self.tileext))\n\n if self.options.verbose:\n print (ti, '/', tcount, tilefilename) # , \"( TileMapService: z / x / y )\"\n\n if self.options.resume \\\n and os.path.exists(tilefilename):\n if self.options.verbose:\n print 'Tile generation skiped because of --resume'\n else:\n self.progressbar(ti / float(tcount))\n continue\n\n # Create directories for the tile\n\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n dsquery = self.mem_drv.Create('', 2\n * self.tilesize, 2 * self.tilesize,\n tilebands)\n\n # TODO: fill the null value\n # for i in range(1, tilebands+1):\n # dsquery.GetRasterBand(1).Fill(tilenodata)\n\n dstile = self.mem_drv.Create('', self.tilesize,\n self.tilesize, tilebands)\n\n # TODO: Implement more clever walking on the tiles with cache functionality\n # probably walk should start with reading of four tiles from top left corner\n # Hilbert curve\n\n children = []\n\n # Read the tiles and write them to query window\n\n for y in range(2 * ty, 2 * ty + 2):\n for x in range(2 * tx, 2 * tx + 2):\n (minx, miny, maxx, maxy) = self.tminmax[tz\n + 1]\n if x >= minx and x <= maxx and y >= miny \\\n and y <= maxy:\n # print(os.path.join(self.output,str(tz + 1), str(x), '%s.%s'% (2**(tz+1)-1-y, self.tileext)))\n dsquerytile = \\\n gdal.Open(os.path.join(self.output,\n str(tz + 1), str(x), '%s.%s'\n % (2**(tz+1)-1-y, self.tileext)),\n gdal.GA_ReadOnly)\n\n if self.options.leaflet:\n if ty:\n tileposy = y % (2 * ty) \\\n * self.tilesize\n elif ty == 0 and y == 1:\n tileposy = self.tilesize\n else:\n tileposy = 0\n else:\n if ty == 0 and y == 1 or ty != 0 \\\n and y % (2 * ty) != 0:\n tileposy = 0\n else:\n tileposy = self.tilesize\n\n if tx:\n tileposx = x % (2 * tx) \\\n * self.tilesize\n elif tx == 0 and x == 1:\n tileposx = self.tilesize\n else:\n tileposx = 0\n dsquery.WriteRaster(\n tileposx,\n tileposy,\n self.tilesize,\n self.tilesize,\n dsquerytile.ReadRaster(0, 0,\n self.tilesize, self.tilesize),\n band_list=list(range(1, tilebands\n + 1)),\n )\n children.append([x, y, tz + 1])\n\n self.scale_query_to_tile(dsquery, dstile,\n tilefilename)\n\n # Write a copy of tile to png/jpg\n\n if self.options.resampling != 'antialias':\n\n # Write a copy of tile to png/jpg\n\n self.out_drv.CreateCopy(tilefilename, dstile,\n strict=0)\n\n if self.options.verbose:\n print (\n '\\tbuild from zoom',\n tz + 1,\n ' tiles:',\n (2 * tx, 2 * ty),\n (2 * tx + 1, 2 * ty),\n (2 * tx, 2 * ty + 1),\n (2 * tx + 1, 2 * ty + 1),\n )\n\n # Create a KML file for this tile.\n\n if self.kml:\n f = open(os.path.join(self.output,\n '%d/%d/%d.kml' % (tz, tx, ty)), 'w')\n f.write(self.generate_kml(tx, ty, tz, children))\n f.close()\n\n if not self.options.verbose:\n self.progressbar(ti / float(tcount))", "def meta_generator(args):\n meta_name = 'frames_meta.csv'\n df_names = [\"channel_idx\",\n \"slice_idx\",\n \"time_idx\",\n \"channel_name\",\n \"file_name\",\n \"pos_idx\"]\n\n # Get all image names\n im_names = aux_utils.get_sorted_names(args.input)\n # Create empty dataframe\n frames_meta = pd.DataFrame(\n index=range(len(im_names)),\n columns=df_names,\n )\n # Fill dataframe with rows from image names\n for i in range(len(im_names)):\n frames_meta.loc[i] = aux_utils.get_ids_from_imname(\n im_name=im_names[i],\n df_names=df_names,\n order=args.order,\n )\n # Write metadata\n meta_filename = os.path.join(args.input, meta_name)\n frames_meta.to_csv(meta_filename, sep=\",\")", "def make_plugin_metadata(data):\n template = load_template('metadata.template.txt')\n return template.render(data)", "def make_metadata(self, final=False):\n## html = header\n html = '<div id=\"intro\">Jedli {} metadata</div>\\n<br/>\\n'.format(jedli_global.index_type)\n\n if final:\n html += '<p class=\"ltr-text\">Click <a href=\"{}\">here</a> for an overview table of all searched sources and results</p>\\n'.format(self.table_pth)\n html += '<p class=\"ltr-text\">Click <a href=\"{}\">here</a> for graphs of the results</p>\\n'.format(self.graph_pth)\n html += '<p class=\"ltr-text\">Search words: </p>\\n'.format(self.graph_pth)\n\n html += '<div id=\"source_table\">\\n'\n html += ' <table>\\n'\n\n # make table header row:\n html+= ' <tr>\\n'\n html+= ' <th>operator</th>\\n'\n if jedli_global.index_type == \"Highlighter\":\n html+= ' <th>highlight color</th>\\n'\n html+= ' <th>hidden tag</th>\\n'\n html+= ' <th>search word(s)</th>\\n'\n html+= ' <th>checklist name</th>\\n'\n html+= ' <th>search options</th>\\n'\n if jedli_global.show_regex:\n html+= ' <th>regex used</th>\\n'\n html+= ' </tr>\\n'\n\n # fill the table: \n for i, dic in enumerate(self.variables.search_values):\n html+= ' <tr>\\n'\n if i > 0:\n html+= ' <td style=\"text-align:left\">{}</td>\\n'.format(dic[\"and_or_not\"])\n else:\n html+= ' <td style=\"text-align:left\">-</td>\\n'\n if jedli_global.index_type == \"Highlighter\":\n html+= ' <td><span style=\"background-color: {0}\">{0}</span></td>\\n'.format(dic[\"color\"])\n html+= ' <td>{}</td>\\n'.format(dic[\"hidden_sign\"])\n html+= ' <td>{}</td>\\n'.format(\"<br/>\".join(dic[\"words\"]))\n if dic[\"checklist_name\"]:\n html+= ' <td style=\"text-align:left\">{}</td>\\n'.format(dic[\"checklist_name\"])\n else:\n html+= ' <td>--</td>\\n'\n html+= ' <td style=\"text-align:left\">{}</td>\\n'.format(self.make_search_options_cell(dic))\n if jedli_global.show_regex:\n html+= ' <td>{}</td>\\n'.format(\"<br/>\".join(dic[\"words_regex\"]))\n html+= ' </tr>\\n'\n html += ' </table>\\n'\n html += '</div>\\n<br/>\\n'\n\n if final:\n html += '<p class=\"ltr-text\">Search date: {}</p>'.format(time.asctime( time.localtime(time.time()) ))\n\n## html += ' Click <a href=\"{}\">here</a> for an overview of all sources searched<br/>\\n'.format(self.table_pth)\n## html += '</div>'\n## html += '<div id=\"top\"><a href=\"..\\{}\">back to overview</a></div><br/>\\n'.format(os.path.split(self.new_file)[1])\n\n## html += footer\n## self.write_html(self.metadata_pth, html)\n return html", "def write_layout(self):\n # Welcome message\n if self.verbose > 0:\n print(\"[info] Generating layout in {0}...\".format(self.layoutdir))\n\n # Top selection panel\n indices = [\n \"\"\"<li><a href=\"{{{{ pathto('generated/{0}') }}}}\">\"\"\"\n \"\"\"{1}</a></li>\"\"\".format(x, self.title_for(x))\n for x in self.module_names]\n\n # Carousel items\n carousel_items = [item for item in os.listdir(self.carouselpath)]\n if len(carousel_items) == 0:\n raise IOError(\"No data found in folder '{0}'.\".format(\n self.carouselpath))\n images = []\n indicators = []\n for cnt, item in enumerate(carousel_items):\n if cnt == 0:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='0' \"\n \"class='active'></li>\")\n images.append(\n \"\"\"<div class=\"active item\">\"\"\"\n \"\"\"<a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</div></a>\"\"\" % item)\n else:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='{0}' \"\n \"</li>\".format(cnt))\n images.append(\n \"\"\"<div class=\"item\"><a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</a></div>\"\"\" % item)\n\n # Create layout maping\n pysphinxdoc_info = {}\n info_file = os.path.join(os.path.dirname(__file__), \"info.py\")\n with open(info_file) as open_file:\n exec(open_file.read(), pysphinxdoc_info)\n layout_info = {\n \"NAME_LOWER\": self.root_module_name,\n \"NAME_UPPER\": self.root_module_name.upper(),\n \"INDEX\": \"\\n\".join(indices),\n \"CAROUSEL_INDICATORS\": \"\\n\".join(indicators),\n \"CAROUSEL_IMAGES\": \"\\n\".join(images),\n \"DESCRIPTION\": self.rst2html(self.release_info[\"DESCRIPTION\"]),\n \"SUMMARY\": self.rst2html(self.release_info[\"SUMMARY\"]),\n \"LOGO\": self.root_module_name,\n \"URL\": self.release_info[\"URL\"],\n \"EXTRAURL\": (self.release_info.get(\"EXTRAURL\") or\n pysphinxdoc_info[\"URL\"]),\n \"EXTRANAME\": self.release_info.get(\"EXTRANAME\") or \"PYSPHINXDOC\"\n }\n\n # Start writting the layout\n template_layout_file = os.path.join(\n os.path.dirname(__file__), \"resources\", \"layout.html\")\n layout_file = os.path.join(self.layoutdir, \"layout.html\")\n self.write_from_template(layout_file, template_layout_file,\n layout_info)", "def meta_load_socrata(self):\n import json\n\n meta = self.filesystem.download('meta')\n\n with open(meta) as f:\n d = json.load(f)\n\n md = self.metadata\n md.about.title = d['name']\n md.about.summary = d['description']\n\n md.write_to_dir()", "def main():\n dest_dir = \".public\"\n if os.path.isdir(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html'])\n )\n\n ignore_files = ignoreFile()\n files_in_dir = os.walk('templates')\n filenames = [filename for _, _, filename in files_in_dir]\n files = [filename for filename in filenames[0] if filename not in ignore_files]\n for i in files:\n template = env.get_template(i)\n final_html = template.render()\n\n\n write_prefix = glob.glob(\".public\")[0]\n write_path = os.path.join(write_prefix, i)\n print write_path\n try:\n html_file = codecs.open(write_path, 'w', 'utf8')\n html_file.write(final_html)\n finally:\n html_file.close()", "def __init__(self, source, destination, layouts):\r\n for root, dirs, files in os.walk(source):\r\n for name in files:\r\n content = open( os.path.join(root, name) ).read()\r\n # Iterate yaml front matter\r\n for config in yaml.load_all(content):\r\n if type(config) is dict:\r\n layout = Template(filename=os.path.join(layouts, config['layout']+\".html\"))\r\n pieces = re.split(\"---\\n\", content) # expect [blank, yaml, content]\r\n html = markdown.markdown(pieces[2])\r\n # Save page\r\n page = open(os.path.join(destination, name), 'w')\r\n page.write(layout.render(data=config, content=html))\r\n page.close()", "def next_metadata():\n\n print('\\nReading .hmp files')\n folder = 'modsources'\n\n for filename in os.listdir(folder):\n if not re.match(r'.+\\.hmp', filename):\n continue\n with open(os.path.join(folder, filename)) as file:\n font_name = _create_font_name(filename)\n print(f'\\nDefined file {filename} as font {font_name}')\n for i, number in enumerate(_iterate_file(file)):\n ascii_value = i + ord(' ')\n yield Metadata(number, ascii_value, font_name)", "def parse_mm_metadata(metadata_dir, file_pattern=\"*metadata*.txt\"):\n\n if not os.path.exists(metadata_dir):\n raise FileExistsError(\"Path '%s' does not exists.\" % metadata_dir)\n\n # todo: are there cases where there are multiple metadata files for one dataset?\n metadata_paths = list(Path(metadata_dir).glob('**/' + file_pattern))\n metadata_paths = sorted(metadata_paths)\n\n if metadata_paths == []:\n raise FileExistsError(\"No metadata files matching pattern '%s' found.\" % file_pattern)\n\n # open first metadata and get roi_size few important pieces of information\n with open(metadata_paths[0], 'r') as f:\n datastore = json.load(f)\n\n # get summary data\n summary = datastore['Summary']\n dims = {}\n for k, entry in summary['IntendedDimensions'].items():\n dims[k] = entry\n\n for k, entry in summary['UserData'].items():\n dims[k] = entry['scalar']\n\n # run through each metadata file to figure out settings for stage positions and individual images\n initialized = False\n multipage_tiff_style = False\n titles = []\n userdata_titles = []\n extra_titles = []\n data = []\n for filename in metadata_paths:\n\n with open(filename, 'r') as f:\n datastore = json.load(f)\n\n for k, entry in datastore.items():\n\n # skip items we don't care much about yet\n if k == 'Summary':\n continue\n\n # separate coordinate data stored in single page TIFF files style metadata\n if re.match(\"Coords-.*\", k):\n continue\n\n # get column titles from metadata\n # get titles\n if not initialized:\n # check for multipage vs single page tiff style\n m = re.match('FrameKey-(\\d+)-(\\d+)-(\\d+)', k)\n if m is not None:\n multipage_tiff_style = True\n\n # get titles\n for kk in entry.keys():\n if kk == 'UserData':\n for kkk in entry[kk].keys():\n userdata_titles.append(kkk)\n else:\n titles.append(kk)\n\n if multipage_tiff_style:\n # these\n extra_titles = ['Frame', 'FrameIndex', 'PositionIndex', 'Slice', 'SliceIndex', 'ChannelIndex']\n extra_titles += [\"directory\"]\n initialized = True\n\n # accumulate data\n data_current = []\n for t in titles:\n data_current.append(entry[t])\n for t in userdata_titles:\n # todo: maybe need to modify this more generally for non-scalar types...\n data_current.append(entry['UserData'][t]['scalar'])\n\n if multipage_tiff_style:\n # parse FrameKey information\n m = re.match('FrameKey-(\\d+)-(\\d+)-(\\d+)', k)\n\n time_index = int(m.group(1))\n channel_index = int(m.group(2))\n z_index = int(m.group(3))\n\n m = re.match('Pos-(\\d+)', entry['PositionName'])\n if m is not None:\n position_index = int(m.group(1))\n else:\n position_index = 0\n\n data_current += [time_index, time_index, position_index, z_index, z_index, channel_index]\n\n # this is also stored in \"extra titles\"\n data_current += [os.path.dirname(filename)]\n\n\n # combine all data\n data.append(data_current)\n\n # have to do some acrobatics to get slice in file info\n userdata_titles = ['User' + t for t in userdata_titles]\n image_metadata = pd.DataFrame(data, columns=titles+userdata_titles+extra_titles)\n\n # for TIF files containing multiple images, we need the position in the file for each image\n fnames = image_metadata['FileName'].unique()\n\n image_pos_in_file = np.zeros((image_metadata.shape[0]), dtype=np.int)\n\n if multipage_tiff_style:\n for fname in fnames:\n inds = (image_metadata['FileName'] == fname)\n current_pos = image_metadata['ImageNumber'][inds]\n image_pos_in_file[inds] = current_pos - current_pos.min()\n\n image_metadata['ImageIndexInFile'] = image_pos_in_file\n\n return image_metadata, dims, summary", "def loadMeta(self):\r\n config = ConfigParser()\r\n config.read(\"data/server.meta\")\r\n specs = ConfigParser()\r\n specs.read(\"data/spectators.meta\")\r\n # Read in the worlds\r\n if config.has_section(\"worlds\"):\r\n for name in config.options(\"worlds\"):\r\n self.worlds[name] = None\r\n if name is \"main\":\r\n self.main_loaded = True\r\n else:\r\n self.worlds[\"main\"] = None\r\n if not self.main_loaded:\r\n self.worlds[\"main\"] = None\r\n # Read in the directors\r\n if config.has_section(\"directors\"):\r\n for name in config.options(\"directors\"):\r\n self.directors.add(name)\r\n # Read in the admins\r\n if config.has_section(\"admins\"):\r\n for name in config.options(\"admins\"):\r\n self.admins.add(name)\r\n # Read in the mods\r\n if config.has_section(\"mods\"):\r\n for name in config.options(\"mods\"):\r\n self.mods.add(name)\r\n # Read in the advanced builders\r\n if config.has_section(\"advbuilders\"):\r\n for name in config.options(\"advbuilders\"):\r\n self.advbuilders.add(name)\r\n if config.has_section(\"silenced\"):\r\n for name in config.options(\"silenced\"):\r\n self.silenced.add(name)\r\n # Read in the spectators\r\n if specs.has_section(\"spectators\"):\r\n for name in specs.options(\"spectators\"):\r\n self.spectators.add(name)\r\n # Read in the bans\r\n if config.has_section(\"banned\"):\r\n for name in config.options(\"banned\"):\r\n self.banned[name] = config.get(\"banned\", name)\r\n # Read in the ipbans\r\n if config.has_section(\"ipbanned\"):\r\n for ip in config.options(\"ipbanned\"):\r\n self.ipbanned[ip] = config.get(\"ipbanned\", ip)", "def write_index_html(wk_dir,region_dict,ext):\n metric_file=metrics_dir_name+'/histogram_metric.json'\n fig_list=[figure_dir_name+'/'+f for f in os.listdir(wk_dir+'/'+figure_dir_name) if f.endswith(ext)]\n hist_metric_exists=os.path.exists(os.path.join(wk_dir,metric_file))\n # Extensive descriptions are set in another function.\n intr_txt,mtrc_txt,hst_mp_txt,hst_txt,hst_df_txt=set_descriptive_text()\n\n # list unique keyword to identify plots for each category\n fig_keys=[\"contributions\",\"1dhistograms\",\"differences\"]\n subtitle_list=[\"Histogram Maps\",\"All Histograms\",\"Histogram Difference\"]\n subheading_list=[\"actual\",\"fractional\"]\n text_list=[hst_mp_txt,hst_txt,hst_df_txt]\n\n # Initialize html text\n html_text=[\n '<html>\\n','<body>','<head><title>ASoP-Spectral</title></head>\\n',\n '<br><h1>ASoP-Spectral results</h1>\\n',intr_txt]\n contents = [\n '<h2>Contents</h2>\\n',\n '<dl>\\n','<dt><a href=\"#Figures\">Figures</a></dt>\\n',\n '<dd><a href=\"#Histogram-Maps\">Histogram Maps</a></dd>\\n',\n '<dd><a href=\"#All-Histograms\">All Histograms</a></dd>\\n',\n '<dd><a href=\"#Histogram-Difference\">Histogram Difference</a></dd>\\n',\n '</dl>\\n']\n if hist_metric_exists:\n contents.insert(2,'<dt><a href=\"#Metrics\">Metrics</a></dt>\\n')\n contents.insert(4,'<dd><a href=\"#Histogram-Metric-Maps\">Histogram Metric Maps</a></dd>\\n')\n html_text.extend(contents)\n\n # Check for optional histogram metric files\n if hist_metric_exists:\n html_text.extend([\n '<section id=\"Metrics\">\\n',\n '<h2>Metrics</h2>\\n',\n mtrc_txt,\n '<br><a href=\"{0}\" target=\"_blank\" >{0}</a>\\n'.format(metric_file),\n '</section>\\n',\n '<section id=\"Figures\">\\n',\n '<h2>Figures</h2>\\n',\n '<section id=\"Histogram-Metric-Maps\">\\n',\n '<h3>Histogram Metric Maps</h3>'])\n sub_list=[f for f in fig_list if ('histogram_metric' in f)]\n for fig in sub_list:\n html_text.append(\n '<p><a href=\"{0}\" target=\"_blank\" alt={0}><img src=\"{0}\" '.format(fig)\n +'width=\"647\" alt=\"{0}\"></a></p>\\n'.format(fig))\n else:\n html_text.append('<section id=\"Figures\">\\n')\n html_text.append('<h2>Figures</h2>\\n')\n\n # Build the rest of the titles, subtitles, text, and figures.\n for title,kword,desc in zip(subtitle_list,fig_keys,text_list):\n html_text.extend([\n '<section id=\"'+title.replace(' ','-')+'\">\\n',\n '<h3>{0}</h3>\\n'.format(title),\n '<p>{0}</p>'.format(desc)])\n plot_list=[f for f in fig_list if (kword in f)]\n for region in region_dict:\n html_text.append('<h4>{0}</h4>\\n'.format(region.replace('_',' ')))\n for heading in subheading_list:\n html_text.append('<h5>{0} contribution</h5>\\n'.format(heading.capitalize()))\n sub_list=[f for f in plot_list if ((heading in f) and (region in f))]\n for fig in sub_list:\n html_text.append('<p><a href=\"{0}\" target=\"_blank\" alt={0}><img src=\"{0}\" width=\"647\" alt=\"{0}\"></a></p>\\n'.format(fig))\n html_text.append('</section>\\n')\n html_text.append('</section>\\n')\n\n html_text.append('</body>\\n</html>\\n')\n filename=wk_dir+\"/index.html\"\n with open(filename,\"w\") as html_page:\n html_page.writelines(html_text)\n return", "def _create_page_htmls(self):\n for page in self.pages:\n content = self.templates['page'].render(page=page, site=self.site)\n path = os.path.join(BASE_DIR, self.paths['output'],\n page.slug, 'index.html')\n write_to_path(content, path)", "def reconstruct_meta_file(self):\n meta_file_content = {}\n\n # Check if `NestedMap` were saved\n map_path = os.path.join(self.objects_dir_path, 'map', 'dictionary.log')\n if os.path.isfile(map_path):\n meta_file_content['dictionary.log'] = {\n 'name': 'dictionary',\n 'type': ['map', 'nested_map'],\n 'data': None,\n 'data_path': 'map',\n }\n\n # Collect metrics meta info\n metrics_info = self.records_storage.get_artifacts_names()\n for metric_name, context_items in metrics_info.items():\n meta_file_content[metric_name] = {\n 'name': metric_name,\n 'type': 'metrics',\n 'data': None,\n 'data_path': '__AIMRECORDS__',\n 'format': {\n 'artifact_format': 'aimrecords',\n 'record_format': 'protobuf',\n },\n 'context': [list(c.items()) for c in context_items],\n }\n return meta_file_content", "def get_files_to_generate(self):\r\n pass", "def test_meta_output(self):\n jsonld_path = os.path.join(testscriptstempdir, 'metajson.jsonld')\n rdf_path = os.path.join(testscriptstempdir, 'metardf.ttl')\n meta_context_path = os.path.join(testscriptstempdir, 'metacontext.jsonld')\n\n # Generate an image of the metamodel\n gen = ContextGenerator(source_yaml_path, importmap=BIOLINK_IMPORT_MAP)\n base = gen.schema.id\n if base[-1] not in '/#':\n base += '/'\n base += gen.schema.name\n with open(meta_context_path, 'w') as tfile:\n tfile.write(gen.serialize())\n with open(jsonld_path, 'w') as tfile:\n tfile.write(JSONLDGenerator(source_yaml_path, fmt=JSONLDGenerator.valid_formats[0],\n importmap=BIOLINK_IMPORT_MAP).serialize(context=meta_context_path))\n g = Graph()\n g.load(jsonld_path, format=\"json-ld\")\n g.serialize(rdf_path, format=\"ttl\")\n g.bind('meta', METAMODEL_NAMESPACE)\n new_ttl = g.serialize(format=\"turtle\").decode()\n new_g = Graph()\n new_g.parse(data=new_ttl, format=\"turtle\")\n self.check_size(g, new_g, URIRef(base), 11, 91, 13, \"meta\")", "def writeMetaData(self):\r\n print \"--------------------------------------\"\r\n print \"Writing Meta Data to Images\"\r\n print \"--------------------------------------\"\r\n #save original location so that you can return to your starting location after \r\n #running Exiftool\r\n original_location = os.getcwd()\r\n parent = self.picDir\r\n exifName = \"\"\r\n #check what os the user is running to account for terminal command differences\r\n if platform.system() == \"Windows\":\r\n exifName = \"exiftool.pl\"\r\n else:\r\n exifName = \"./exiftool\"\r\n #make sure the directories are in the correct format\r\n parent = parent.strip().strip(\"'\").strip('\"')\r\n #navigate to the file that the user's exif program is located in \r\n #make a list of all of the folders in this directory\r\n path_list = [x for x in os.listdir(parent)]\r\n exifName + \" -stay_open True -@ \" + self.argFile\r\n for item in path_list:\r\n if self.copyText not in item:\r\n data = self.tagData[item]\r\n path = os.path.join(parent, item)\r\n with open(self.argFile, \"a+\") as f:\r\n cmd =\"-q\\n-overwrite_original\\n-RegionName=\" + data[0] + '\\n' + path + '\\n'\r\n f.write(cmd)\r\n #cmd = \"-RegionType=Face\"+ '\\n' + path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAppliedToDimensionsW=\" + data[3] + '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAppliedToDimensionsH=\" + data[4] + '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAppliedToDimensionsUnit=pixel\" + '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAreaX=0.5\" + '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAreaY=0.5\" + '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAreaW=1\"+ '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAreaH=1\" + '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #cmd = \"-RegionAreaUnit=normalized\"+ '\\n'+ path + '\\n'\r\n #f.write(cmd)\r\n #Adds ID number and Class Year\r\n cmd = \"-Subject=\"+ data[1]+\",\"+data[2] + '\\n'+ path + '\\n'\r\n f.write(cmd)\r\n \r\n f.write(\"-execute\\n\")\r\n print \"--------------------------------------\"\r\n print \"ArgFile Made\"\r\n print \"--------------------------------------\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
def generate_base_tiles(self): print 'Generating Base Tiles:' if self.options.verbose: # mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY # px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz) # print "Pixel coordinates:", px, py, (mx, my) print '' print 'Tiles generated from the max zoom level:' print '----------------------------------------' print '' # Set the bounds (tminx, tminy, tmaxx, tmaxy) = self.tminmax[self.tmaxz] # Just the center tile # tminx = tminx+ (tmaxx - tminx)/2 # tminy = tminy+ (tmaxy - tminy)/2 # tmaxx = tminx # tmaxy = tminy ds = self.out_ds tilebands = self.dataBandsCount + 1 querysize = self.querysize if self.options.verbose: print ('dataBandsCount: ', self.dataBandsCount) print ('tilebands: ', tilebands) # print tminx, tminy, tmaxx, tmaxy tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy)) # print tcount ti = 0 tz = self.tmaxz yrange = range(tmaxy, tminy - 1, -1) if self.options.leaflet: yrange = range(tminy, tmaxy + 1) for ty in yrange: for tx in range(tminx, tmaxx + 1): if self.stopped: break ti += 1 tilefilename = os.path.join(self.output, str(tz), str(tx), '%s.%s' % ((2**tz-1-ty), self.tileext)) if self.options.verbose: print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )" if self.options.resume and os.path.exists(tilefilename): if self.options.verbose: print 'Tile generation skiped because of --resume' else: self.progressbar(ti / float(tcount)) continue # Create directories for the tile if not os.path.exists(os.path.dirname(tilefilename)): os.makedirs(os.path.dirname(tilefilename)) if self.options.profile == 'mercator': # Tile bounds in EPSG:900913 b = self.mercator.TileBounds(tx, ty, tz) elif self.options.profile == 'geodetic': b = self.geodetic.TileBounds(tx, ty, tz) # print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % ( b[0], b[1], b[2], b[3], "tiles.vrt", tz, tx, ty) # Don't scale up by nearest neighbour, better change the querysize # to the native resolution (and return smaller query tile) for scaling if self.options.profile in ('mercator', 'geodetic'): (rb, wb) = self.geo_query(ds, b[0], b[3], b[2], b[1]) nativesize = wb[0] + wb[2] # Pixel size in the raster covering query geo extent if self.options.verbose: print ('\tNative Extent (querysize', nativesize, '): ', rb, wb) # Tile bounds in raster coordinates for ReadRaster query (rb, wb) = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize, ) (rx, ry, rxsize, rysize) = rb (wx, wy, wxsize, wysize) = wb else: # 'raster' profile: tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom xsize = self.out_ds.RasterXSize # size of the raster in pixels ysize = self.out_ds.RasterYSize if tz >= self.nativezoom: querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize) rx = tx * tsize rxsize = 0 if tx == tmaxx: rxsize = xsize % tsize if rxsize == 0: rxsize = tsize rysize = 0 if ty == tmaxy: rysize = ysize % tsize if rysize == 0: rysize = tsize if self.options.leaflet: ry = ty * tsize else: ry = ysize - ty * tsize - rysize (wx, wy) = (0, 0) (wxsize, wysize) = (int(rxsize / float(tsize) * self.tilesize), int(rysize / float(tsize) * self.tilesize)) if not self.options.leaflet: if wysize != self.tilesize: wy = self.tilesize - wysize if self.options.verbose: print ('\tReadRaster Extent: ', (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)) # Query is in 'nearest neighbour' but can be bigger in then the tilesize # We scale down the query to the tilesize by supplied algorithm. # Tile dataset in memory dstile = self.mem_drv.Create('', self.tilesize, self.tilesize, tilebands) data = ds.ReadRaster( rx, ry, rxsize, rysize, wxsize, wysize, band_list=list(range(1, self.dataBandsCount + 1)), ) alpha = self.alphaband.ReadRaster( rx, ry, rxsize, rysize, wxsize, wysize, ) if self.tilesize == querysize: # Use the ReadRaster result directly in tiles ('nearest neighbour' query) dstile.WriteRaster( wx, wy, wxsize, wysize, data, band_list=list(range(1, self.dataBandsCount + 1)), ) dstile.WriteRaster( wx, wy, wxsize, wysize, alpha, band_list=[tilebands], ) else: # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID) # the ReadRaster function returns high-quality raster (not ugly nearest neighbour) # TODO: Use directly 'near' for WaveLet files # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo dsquery = self.mem_drv.Create('', querysize, querysize, tilebands) # TODO: fill the null value in case a tile without alpha is produced (now only png tiles are supported) # for i in range(1, tilebands+1): # dsquery.GetRasterBand(1).Fill(tilenodata) dsquery.WriteRaster( wx, wy, wxsize, wysize, data, band_list=list(range(1, self.dataBandsCount + 1)), ) dsquery.WriteRaster( wx, wy, wxsize, wysize, alpha, band_list=[tilebands], ) # print('-'+tilefilename+'-') self.scale_query_to_tile(dsquery, dstile, tilefilename) del dsquery del data if self.options.resampling != 'antialias': # Write a copy of tile to png/jpg self.out_drv.CreateCopy(tilefilename, dstile, strict=0) del dstile # Create a KML file for this tile. if self.kml: kmlfilename = os.path.join(self.output, str(tz), str(tx), '%d.kml' % ty) if not self.options.resume \ or not os.path.exists(kmlfilename): f = open(kmlfilename, 'w') f.write(self.generate_kml(tx, ty, tz)) f.close() if not self.options.verbose: self.progressbar(ti / float(tcount))
[ "def divideIntoTiles(inputRaster,dim):\r\n tiles=[]\r\n xmin=0\r\n xmax=dim\r\n ymin=0\r\n ymax=dim\r\n #iterate down the Y values\r\n for i in range(0,inputRaster.shape[0]//dim):\r\n #iterate across the X values\r\n for j in range(0,inputRaster.shape[1]//dim):\r\n coords=pixelCoordinates(xmin,xmax,ymin,ymax)\r\n tiles.append(coords)\r\n xmin+=dim\r\n xmax+=dim \r\n xmin=0\r\n xmax=dim\r\n ymin+=dim\r\n ymax+=dim\r\n return(tiles)", "def create_individual_building_raster(self):\n canvas = np.zeros((self.max_y - self.min_y + 1,\n self.max_x - self.min_x + 1))\n for point in self.points:\n canvas[point[1] - self.min_y, point[0] - self.min_x] = 1\n return canvas", "def _prep_tiles(self):\r\n # todo: write this. expected output is a flat iterable.\r\n # todo: explore turning flatten() into generator\r\n\r\n if self._bounds and not self._tiles:\r\n # build tile list from bounds\r\n self._zoom = self._detail + Pin.find_span_zoom(self._bounds)\r\n self._tiles = Tile.from_pins(self._bounds, self._zoom) # get the tiles covering the span\r\n Tile.new_tile_q.join() # wait for tiles to arrive\r\n\r\n if self._tiles and not self._bounds:\r\n sw_pin = Pin.from_tile_coord(np.min(self._X), np.max(self._Y) + 1, self._zoom)\r\n ne_pin = Pin.from_tile_coord(np.max(self._X) + 1, np.min(self._Y), self._zoom)\r\n self._bounds = sw_pin, ne_pin\r\n\r\n assert all(isinstance(t, Tile) for t in self._tiles), f'{self._tiles}' # all objects must be tiles\r\n self._X, self._Y, zooms = np.asarray(list(self._tiles)).T # asarray won't work on sets. ugh.\r\n assert all(zooms == zooms[0]) # all zooms must be the same\r\n self._zoom = zooms[0]", "def gail_lattice_gen(n_min, n_max, d, z):\n m_low = floor(log2(n_min))+1 if n_min > 0 else 0\n m_high = ceil(log2(n_max))\n x_lat_full = vstack([gen_block(m,z) for m in range(int(m_low),int(m_high)+1)])\n cut1 = int(floor(n_min-2**(m_low-1))) if n_min>0 else 0\n cut2 = int(cut1+n_max-n_min)\n x_lat = x_lat_full[cut1:cut2,:]\n return x_lat", "def init_tiles(self):\n for simple in [Game.TILE_SIMPLE_DOT, Game.TILE_SIMPLE_BAMBOO, Game.TILE_SIMPLE_CHAR]:\n for value in range(Game.SIZE_SIMPLE):\n self.tiles += [(simple, value) for i in range(4)]\n\n for value in ['east', 'west', 'north', 'south']:\n self.tiles += [(Game.TILE_HONOR_WIND, value) for i in range(4)]\n self.tiles += [(Game.TILE_BONUS_FLOWER, value)]\n self.tiles += [(Game.TILE_BONUS_SEASON, value)]\n\n for value in ['red', 'green', 'white']:\n self.tiles += [(Game.TILE_HONOR_DRAGON, value) for i in range(4)]\n\n random.shuffle(self.tiles)\n return", "def output_for_visualization(self, tile_shape, tile_spacing):\n \n size = list(self.bases.pooling_units[0].size())\n all_outputs = np.zeros((self.num_bases, size[1], size[2]))\n\n for i in range(all_outputs.shape[0]):\n all_outputs[i] = self.bases.pooling_units[0,i,:,:].numpy()\n \n img_shape = self.bases.pooling_units[0,0].shape\n all_outputs = utils.tile_raster_images(all_outputs, img_shape, tile_shape, tile_spacing)\n all_outputs = utils.normalize_image(all_outputs)\n return all_outputs", "def generate_tiles(self, state):\n rows = state.map.split()[::-1] # Y-axis is positive, so start at the bottom\n height = len(rows)\n width = len(rows[0])\n self.tiles = [[None for _ in range(height)] for _ in range(width)]\n for y, row in enumerate(rows):\n for x, char in enumerate(row):\n self.tiles[x][y] = Tile(char, x, y)", "def extractTile(inputRaster,pixelCoordinates):\r\n tile = inputRaster[pixelCoordinates.ymin:pixelCoordinates.ymax,\\\r\n pixelCoordinates.xmin:pixelCoordinates.xmax, :]\r\n #clip because NA values are encoded as a super negative value\r\n tile=np.clip(tile,0,None)\r\n tile=np.swapaxes(tile,0,2)\r\n tile=tile[np.newaxis]\r\n tile=torch.from_numpy(tile).float()\r\n return(tile)", "def RangeTemplate(n, start=32, branch=4, shared=False):\n rows = []\n width = start\n idx = 1\n while width <= n:\n for i in range(0, n-width//2, width//2):\n row = np.zeros(n, dtype=int)\n row[i:i+width] = np.arange(width) + idx\n if not shared: idx += width\n rows.append(row)\n if shared: idx += width\n width *= branch\n return AugmentedIdentity(np.vstack(rows))", "def reconstruct( pyramid ):\n\n #TODO: Implement Laplacian pyramid reconstruction using gausspyr_expand\n levels = len(pyramid) - 1 #highest level\n\n cur_img = pyramid[levels].copy()\n for i in range(levels, 0, -1):\n expand_cur = gausspyr_expand(cur_img, pyramid[i-1].shape)\n next_img = expand_cur + pyramid[i-1]\n cur_img = next_img\n \n img = next_img\n\n return img", "def voxel_superset(s):\n # return ndim_grid(np.trunc(s.min(0)) - 1, np.trunc(s.max(0)) + 1)\n return ndim_grid(np.round(s.min(0)) - 1, np.round(s.max(0)) + 1)", "def custom_grid():\n\n return np.arange(1, 82, dtype=np.int32).reshape((9, 9))", "def make_tiles(self):\n num_tiles = self._puzzle_height * self._puzzle_width\n #subsurface is a ract(left, top, width, height\n \n for idx in xrange(num_tiles):\n self._tiles.append(self._tiles_sprite.subsurface(\n (idx * TILE_SIZE, 0, TILE_SIZE, TILE_SIZE)))", "def _split_terrain(self, terrain):\n textures = []\n (terrain_width, terrain_height) = terrain.size\n texture_resolution = terrain_width / 16\n for y in range(16):\n for x in range(16):\n left = x*texture_resolution\n upper = y*texture_resolution\n right = left+texture_resolution\n lower = upper+texture_resolution\n region = terrain.transform(\n (16, 16),\n Image.EXTENT,\n (left,upper,right,lower),\n Image.BICUBIC)\n textures.append(region)\n\n return textures", "def create_new_raster_from_base(input_raster, output_raster, write_array):\n\n with rasterio.open(input_raster, 'r') as src:\n with rasterio.open(output_raster, 'w',\n driver=src.driver,\n width=src.width,\n height=src.height,\n count=1,\n crs=src.crs,\n dtype=np.uint8,\n transform=src.transform) as dst:\n dst.write(write_array[:, :], 1)", "def initialize_tiles(self):\n for x in range(0, self.width, self.tile_size):\n for y in range(0, self.height, self.tile_size):\n self.group_tiles.add(Tile(x, y, self.tile_size))", "def create_tiles(x_tiles, y_tiles):\n tiles = []\n total = x_tiles * y_tiles\n counter = 0\n\n print(\"create_tiles from '{}' -> '{}'\".format(0, total))\n for j in range(x_tiles):\n for i in range(y_tiles):\n tiles.append(Tile(counter, i, j))\n counter += 1\n\n return tiles", "def get_tiles_ranges(self, zoom):\n bb = array.array('l')\n filedata = self.zip.read('bbox.bin')\n bb.fromstring(filedata)\n logging.debug('BB: %s', list(bb))\n minpx, minpy = m.MetersToPixels(bb[0], bb[1], zoom) \n maxpx, maxpy = m.MetersToPixels(bb[2], bb[3], zoom) \n logging.debug('PX: %s, %s, %s, %s', minpx, minpy, maxpx, maxpy)\n\n mintx, minty = m.MetersToTile(bb[0], bb[1], zoom) \n maxtx, maxty = m.MetersToTile(bb[2], bb[3], zoom) \n return mintx, minty, maxtx, maxty", "def draw_tile_backgrounds(self, tiles):\n\n def process_tile(tile):\n h = tile.height\n h_index = (h - self.parent.min_height) / (self.parent.max_height - self.parent.min_height)\n\n rgb_rand_1 = random.randint(0, self.ocean_noise)\n\n height_rgb = [0, 0, 0]\n height_rgb[0] = self.height_rgb_low[0] + h_index * (self.height_rgb_high[0] - self.height_rgb_low[0])\n height_rgb[1] = self.height_rgb_low[1] + h_index * (self.height_rgb_high[1] - self.height_rgb_low[1])\n height_rgb[2] = self.height_rgb_low[2] + h_index * (self.height_rgb_high[2] - self.height_rgb_low[2])\n\n water_rgb = (rgb_rand_1, rgb_rand_1, 255)\n if self.screen_mode == \"dark\":\n water_rgb = (rgb_rand_1 // 2, rgb_rand_1 // 2, 150)\n if self.screen_mode == \"martin\":\n water_rgb = (195 + rgb_rand_1 * 0.5, 234 + rgb_rand_1 * 0.5, 251)\n\n fillColors = [\n height_rgb, # Ground\n height_rgb, # Rail\n self.road_tile_rgb, # Road\n height_rgb, # Town building\n height_rgb, # Trees\n self.station_rgb, # Stations\n water_rgb, # Water\n height_rgb, # Void\n self.industry_rgb, # Industries\n self.torb_rgb, # Tunnel/bridge\n height_rgb, # Objects\n ]\n fillColor = fillColors[tile.kind % len(fillColors)]\n if tile.kind == 1:\n rail = tile.occupant\n if rail.is_depot:\n fillColor = self.rail_depot_rgb\n\n if tile.kind == 5:\n station = tile.occupant\n if station.station_type == 0:\n fillColor = self.rail_station_rgb\n if station.station_type == 1:\n fillColor = self.airport_rgb\n if station.station_type == 2:\n fillColor = self.bus_station_rgb\n if station.station_type == 3:\n fillColor = self.truck_station_rgb\n if station.station_type == 4:\n fillColor = self.heliport_rgb\n if station.station_type == 5:\n fillColor = self.seaport_rgb\n\n self.draw_square(tile, fillColor)\n if tile.kind == 1:\n rail = tile.occupant\n if not rail.is_depot:\n self.draw_rail_background(tile)\n\n if self.parent.show_progress_bar:\n with alive_bar(len(tiles)) as abar:\n for tile in tiles:\n process_tile(tile)\n abar()\n else:\n for tile in tiles:\n process_tile(tile)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generation of the overview tiles (higher in the pyramid) based on existing tiles
def generate_overview_tiles(self): print 'Generating Overview Tiles:' tilebands = self.dataBandsCount + 1 # Usage of existing tiles: from 4 underlying tiles generate one as overview. tcount = 0 for tz in range(self.tmaxz - 1, self.tminz - 1, -1): (tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz] tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy)) ti = 0 # querysize = tilesize * 2 for tz in range(self.tmaxz - 1, self.tminz - 1, -1): (tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz] yrange = range(tmaxy, tminy - 1, -1) if self.options.leaflet: yrange = range(tminy, tmaxy + 1) for ty in yrange: for tx in range(tminx, tmaxx + 1): if self.stopped: break ti += 1 tilefilename = os.path.join(self.output, str(tz), str(tx), '%s.%s' % (2**tz-1-ty, self.tileext)) if self.options.verbose: print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )" if self.options.resume \ and os.path.exists(tilefilename): if self.options.verbose: print 'Tile generation skiped because of --resume' else: self.progressbar(ti / float(tcount)) continue # Create directories for the tile if not os.path.exists(os.path.dirname(tilefilename)): os.makedirs(os.path.dirname(tilefilename)) dsquery = self.mem_drv.Create('', 2 * self.tilesize, 2 * self.tilesize, tilebands) # TODO: fill the null value # for i in range(1, tilebands+1): # dsquery.GetRasterBand(1).Fill(tilenodata) dstile = self.mem_drv.Create('', self.tilesize, self.tilesize, tilebands) # TODO: Implement more clever walking on the tiles with cache functionality # probably walk should start with reading of four tiles from top left corner # Hilbert curve children = [] # Read the tiles and write them to query window for y in range(2 * ty, 2 * ty + 2): for x in range(2 * tx, 2 * tx + 2): (minx, miny, maxx, maxy) = self.tminmax[tz + 1] if x >= minx and x <= maxx and y >= miny \ and y <= maxy: # print(os.path.join(self.output,str(tz + 1), str(x), '%s.%s'% (2**(tz+1)-1-y, self.tileext))) dsquerytile = \ gdal.Open(os.path.join(self.output, str(tz + 1), str(x), '%s.%s' % (2**(tz+1)-1-y, self.tileext)), gdal.GA_ReadOnly) if self.options.leaflet: if ty: tileposy = y % (2 * ty) \ * self.tilesize elif ty == 0 and y == 1: tileposy = self.tilesize else: tileposy = 0 else: if ty == 0 and y == 1 or ty != 0 \ and y % (2 * ty) != 0: tileposy = 0 else: tileposy = self.tilesize if tx: tileposx = x % (2 * tx) \ * self.tilesize elif tx == 0 and x == 1: tileposx = self.tilesize else: tileposx = 0 dsquery.WriteRaster( tileposx, tileposy, self.tilesize, self.tilesize, dsquerytile.ReadRaster(0, 0, self.tilesize, self.tilesize), band_list=list(range(1, tilebands + 1)), ) children.append([x, y, tz + 1]) self.scale_query_to_tile(dsquery, dstile, tilefilename) # Write a copy of tile to png/jpg if self.options.resampling != 'antialias': # Write a copy of tile to png/jpg self.out_drv.CreateCopy(tilefilename, dstile, strict=0) if self.options.verbose: print ( '\tbuild from zoom', tz + 1, ' tiles:', (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty), (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1), ) # Create a KML file for this tile. if self.kml: f = open(os.path.join(self.output, '%d/%d/%d.kml' % (tz, tx, ty)), 'w') f.write(self.generate_kml(tx, ty, tz, children)) f.close() if not self.options.verbose: self.progressbar(ti / float(tcount))
[ "def _render_tiles(self, tiles, wslice, hslice):\n\n for row in tiles:\n for atile in row:\n basex = wslice*atile.x\n basey = hslice*atile.y\n if atile.visited is True:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=atile.bg)\n else:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=\"black\")", "def init_tiles(self):\n for simple in [Game.TILE_SIMPLE_DOT, Game.TILE_SIMPLE_BAMBOO, Game.TILE_SIMPLE_CHAR]:\n for value in range(Game.SIZE_SIMPLE):\n self.tiles += [(simple, value) for i in range(4)]\n\n for value in ['east', 'west', 'north', 'south']:\n self.tiles += [(Game.TILE_HONOR_WIND, value) for i in range(4)]\n self.tiles += [(Game.TILE_BONUS_FLOWER, value)]\n self.tiles += [(Game.TILE_BONUS_SEASON, value)]\n\n for value in ['red', 'green', 'white']:\n self.tiles += [(Game.TILE_HONOR_DRAGON, value) for i in range(4)]\n\n random.shuffle(self.tiles)\n return", "def generate_base_tiles(self):\n\n print 'Generating Base Tiles:'\n\n if self.options.verbose:\n\n # mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n # px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n # print \"Pixel coordinates:\", px, py, (mx, my)\n\n print ''\n print 'Tiles generated from the max zoom level:'\n print '----------------------------------------'\n print ''\n\n # Set the bounds\n\n (tminx, tminy, tmaxx, tmaxy) = self.tminmax[self.tmaxz]\n\n # Just the center tile\n # tminx = tminx+ (tmaxx - tminx)/2\n # tminy = tminy+ (tmaxy - tminy)/2\n # tmaxx = tminx\n # tmaxy = tminy\n\n ds = self.out_ds\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print ('dataBandsCount: ', self.dataBandsCount)\n print ('tilebands: ', tilebands)\n\n # print tminx, tminy, tmaxx, tmaxy\n\n tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))\n\n # print tcount\n\n ti = 0\n\n tz = self.tmaxz\n yrange = range(tmaxy, tminy - 1, -1)\n if self.options.leaflet:\n yrange = range(tminy, tmaxy + 1)\n\n for ty in yrange:\n for tx in range(tminx, tmaxx + 1):\n\n if self.stopped:\n break\n ti += 1\n tilefilename = os.path.join(self.output, str(tz),\n str(tx), '%s.%s' % ((2**tz-1-ty), self.tileext))\n if self.options.verbose:\n print (ti, '/', tcount, tilefilename) # , \"( TileMapService: z / x / y )\"\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print 'Tile generation skiped because of --resume'\n else:\n self.progressbar(ti / float(tcount))\n continue\n\n # Create directories for the tile\n\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n\n # Tile bounds in EPSG:900913\n\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # print \"\\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif\" % ( b[0], b[1], b[2], b[3], \"tiles.vrt\", tz, tx, ty)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n (rb, wb) = self.geo_query(ds, b[0], b[3], b[2],\n b[1])\n nativesize = wb[0] + wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print ('\\tNative Extent (querysize',\n nativesize, '): ', rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n\n (rb, wb) = self.geo_query(\n ds,\n b[0],\n b[3],\n b[2],\n b[1],\n querysize=querysize,\n )\n\n (rx, ry, rxsize, rysize) = rb\n (wx, wy, wxsize, wysize) = wb\n else:\n\n # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = tx * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n if self.options.leaflet:\n ry = ty * tsize\n else:\n ry = ysize - ty * tsize - rysize\n\n (wx, wy) = (0, 0)\n (wxsize, wysize) = (int(rxsize / float(tsize)\n * self.tilesize), int(rysize / float(tsize)\n * self.tilesize))\n if not self.options.leaflet:\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n if self.options.verbose:\n print ('\\tReadRaster Extent: ', (rx, ry, rxsize,\n rysize), (wx, wy, wxsize, wysize))\n\n # Query is in 'nearest neighbour' but can be bigger in then the tilesize\n # We scale down the query to the tilesize by supplied algorithm.\n\n # Tile dataset in memory\n\n dstile = self.mem_drv.Create('', self.tilesize,\n self.tilesize, tilebands)\n data = ds.ReadRaster(\n rx,\n ry,\n rxsize,\n rysize,\n wxsize,\n wysize,\n band_list=list(range(1, self.dataBandsCount + 1)),\n )\n alpha = self.alphaband.ReadRaster(\n rx,\n ry,\n rxsize,\n rysize,\n wxsize,\n wysize,\n )\n\n if self.tilesize == querysize:\n\n # Use the ReadRaster result directly in tiles ('nearest neighbour' query)\n dstile.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n data,\n band_list=list(range(1, self.dataBandsCount\n + 1)),\n )\n dstile.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n alpha,\n band_list=[tilebands],\n )\n else:\n \n # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)\n # the ReadRaster function returns high-quality raster (not ugly nearest neighbour)\n # TODO: Use directly 'near' for WaveLet files\n # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo\n\n dsquery = self.mem_drv.Create('', querysize,\n querysize, tilebands)\n\n # TODO: fill the null value in case a tile without alpha is produced (now only png tiles are supported)\n # for i in range(1, tilebands+1):\n # dsquery.GetRasterBand(1).Fill(tilenodata)\n\n dsquery.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n data,\n band_list=list(range(1, self.dataBandsCount\n + 1)),\n )\n dsquery.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n alpha,\n band_list=[tilebands],\n )\n\n # print('-'+tilefilename+'-')\n self.scale_query_to_tile(dsquery, dstile,\n tilefilename)\n del dsquery\n\n del data\n\n if self.options.resampling != 'antialias':\n\n # Write a copy of tile to png/jpg\n\n self.out_drv.CreateCopy(tilefilename, dstile,\n strict=0)\n\n del dstile\n\n # Create a KML file for this tile.\n\n if self.kml:\n kmlfilename = os.path.join(self.output, str(tz),\n str(tx), '%d.kml' % ty)\n if not self.options.resume \\\n or not os.path.exists(kmlfilename):\n f = open(kmlfilename, 'w')\n f.write(self.generate_kml(tx, ty, tz))\n f.close()\n\n if not self.options.verbose:\n self.progressbar(ti / float(tcount))", "def make_tiles(self):\n num_tiles = self._puzzle_height * self._puzzle_width\n #subsurface is a ract(left, top, width, height\n \n for idx in xrange(num_tiles):\n self._tiles.append(self._tiles_sprite.subsurface(\n (idx * TILE_SIZE, 0, TILE_SIZE, TILE_SIZE)))", "def _create_tiles(self):\r\n for column in range(self.columns):\r\n for row in range(self.rows):\r\n tile_name = str(column) + ',' + str(row)\r\n self.tiles[tile_name] = Tile(column=column, row=row)", "def __initTiles(self):\n for m in range(self.amountVertical):\n for n in range(self.amountHorizontal):\n tile = self.themeFactory.createThemeElement(self.mapfile[m][n])\n tile.setCoordinates(m, n)\n tile.number = (m * self.amountHorizontal) + n\n self.tiles.append(tile)\n self.sprites.add(tile)", "def generate_tiles(self, state):\n rows = state.map.split()[::-1] # Y-axis is positive, so start at the bottom\n height = len(rows)\n width = len(rows[0])\n self.tiles = [[None for _ in range(height)] for _ in range(width)]\n for y, row in enumerate(rows):\n for x, char in enumerate(row):\n self.tiles[x][y] = Tile(char, x, y)", "def make_tiles(self, x_size, y_size, x_step, y_step, output_path, verbose=True):\n\n fig, ax = self.make_figure()\n x = self.doc.header['$EXTMIN'][0]\n y = self.doc.header['$EXTMIN'][1]\n\n # Slide until the bottom edge of the window is above the top of\n # the elements in the doc\n while y < self.doc.header['$EXTMAX'][1]:\n\n # Get window into document\n xlim = (x, x + x_size)\n ylim = (y, y + y_size)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # to check if image is empty\n # import cv2\n # im = cv2.imread('2.jpg')\n # if im is None:\n # Print(\"Image is empty\")\n\n # to get percentage of empty space in image\n # from PIL import Image\n # image = Image.open(\"pepper.png\")\n # bg = image.getpixel((0,0))\n # width, height = image.size\n # bg_count = next(n for n,c in image.getcolors(width*height) if c==bg)\n # img_count = width*height - bg_count\n # img_percent = img_count*100.0/width/height\n\n filename = \"%s_x_%s_%s_y_%s_%s.png\" % (\"tile_\", xlim[0], xlim[1], ylim[0], ylim[1])\n if verbose:\n print('Writing: %s' % filename)\n fig.savefig(os.path.join(output_path, filename), dpi=self.dpi)\n\n # Step\n x += x_step\n if x > self.doc.header['$EXTMAX'][0]:\n x = self.doc.header['$EXTMIN'][0]\n y += y_step", "def buildTiles(self, items, attributes):\n matrix = {}\n\n def addItem(tx, ty, px, py, **itemparams):\n if '{}|{}'.format(tx, ty) not in matrix:\n matrix['{}|{}'.format(tx, ty)] = []\n matrix['{}|{}'.format(tx, ty)].append([px, py, itemparams])\n\n params = {}\n\n for zoom in self.ZOOMLEVELS:\n\n if not os.path.exists('{}/{}'.format(self.DESTPATH, zoom)): # create directory\n os.makedirs('{}/{}'.format(self.DESTPATH, zoom))\n\n for item in items:\n _last = None\n for node in item.parameters['nodes']:\n coord = deg2num(float(node['lat']), float(node['lon']), zoom)\n\n if _last is not None:\n\n if _last[0] <= coord[0]: # eval tiles in x direction\n dx = range(_last[0], coord[0] + 1)\n else:\n dx = range(_last[0], coord[0] - 1, -1)\n\n if _last[1] <= coord[1]: # eval tiles in y direction\n dy = range(_last[1], coord[1] + 1)\n else:\n dy = range(_last[1], coord[1] - 1, -1)\n\n for x in dx: # loop through tiles\n for y in dy:\n lstart = (_last[2] + (_last[0] - x) * 256, _last[3] + (_last[1] - y) * 256) # start point\n lend = (coord[2] + (coord[0] - x) * 256, coord[3] + (coord[1] - y) * 256) # end point\n\n if os.path.exists('{}/{}/{}-{}.png'.format(self.DESTPATH, zoom, x, y)):\n img = Image.open('{}/{}/{}-{}.png'.format(self.DESTPATH, zoom, x, y))\n else:\n img = Image.new('RGBA', (256, 256))\n draw = ImageDraw.Draw(img)\n\n draw.line([lstart, lend], fill=self.LINECOLOR, width=(zoom - 15) * 2) # draw line\n img.save('{}/{}/{}-{}.png'.format(self.DESTPATH, zoom, x, y))\n\n _last = coord", "def draw_tile_backgrounds(self, tiles):\n\n def process_tile(tile):\n h = tile.height\n h_index = (h - self.parent.min_height) / (self.parent.max_height - self.parent.min_height)\n\n rgb_rand_1 = random.randint(0, self.ocean_noise)\n\n height_rgb = [0, 0, 0]\n height_rgb[0] = self.height_rgb_low[0] + h_index * (self.height_rgb_high[0] - self.height_rgb_low[0])\n height_rgb[1] = self.height_rgb_low[1] + h_index * (self.height_rgb_high[1] - self.height_rgb_low[1])\n height_rgb[2] = self.height_rgb_low[2] + h_index * (self.height_rgb_high[2] - self.height_rgb_low[2])\n\n water_rgb = (rgb_rand_1, rgb_rand_1, 255)\n if self.screen_mode == \"dark\":\n water_rgb = (rgb_rand_1 // 2, rgb_rand_1 // 2, 150)\n if self.screen_mode == \"martin\":\n water_rgb = (195 + rgb_rand_1 * 0.5, 234 + rgb_rand_1 * 0.5, 251)\n\n fillColors = [\n height_rgb, # Ground\n height_rgb, # Rail\n self.road_tile_rgb, # Road\n height_rgb, # Town building\n height_rgb, # Trees\n self.station_rgb, # Stations\n water_rgb, # Water\n height_rgb, # Void\n self.industry_rgb, # Industries\n self.torb_rgb, # Tunnel/bridge\n height_rgb, # Objects\n ]\n fillColor = fillColors[tile.kind % len(fillColors)]\n if tile.kind == 1:\n rail = tile.occupant\n if rail.is_depot:\n fillColor = self.rail_depot_rgb\n\n if tile.kind == 5:\n station = tile.occupant\n if station.station_type == 0:\n fillColor = self.rail_station_rgb\n if station.station_type == 1:\n fillColor = self.airport_rgb\n if station.station_type == 2:\n fillColor = self.bus_station_rgb\n if station.station_type == 3:\n fillColor = self.truck_station_rgb\n if station.station_type == 4:\n fillColor = self.heliport_rgb\n if station.station_type == 5:\n fillColor = self.seaport_rgb\n\n self.draw_square(tile, fillColor)\n if tile.kind == 1:\n rail = tile.occupant\n if not rail.is_depot:\n self.draw_rail_background(tile)\n\n if self.parent.show_progress_bar:\n with alive_bar(len(tiles)) as abar:\n for tile in tiles:\n process_tile(tile)\n abar()\n else:\n for tile in tiles:\n process_tile(tile)", "def initialize_tiles(self):\n for x in range(0, self.width, self.tile_size):\n for y in range(0, self.height, self.tile_size):\n self.group_tiles.add(Tile(x, y, self.tile_size))", "def init_tiles(self):\n\t\tfor y in range(self.height):\n\t\t\tself.tiles.append([])\n\t\t\tfor x in range(self.width):\n\t\t\t\tnext_tile = Tile(self, x, y) #TODO: change if tiles get args\n\t\t\t\tself.tiles[y].append(next_tile)\n\t\t\t\tnext_tile.update()", "def new_tile(self):\n # replace with your code (Phase 3)\n\n # Bonus: Check if board is full and do not generate new tile\n\n # Generate a random number up to 1\n\n # Assign new tile depending on generated number\n\n # Place new tile on randomly selected empty square from board\n pass", "def place_initial_tile(self):\n self.create_list_tiles()\n add_dist = True\n x_tile = (self.FIRST_TILE)\n y_tile = (self.FIRST_TILE)\n color = self.WHITE\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES-1][self.HALF_SQUARES-1] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)\n\n x_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n y_tile = (self.FIRST_TILE)\n color = self.BLACK\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES-1][self.HALF_SQUARES] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)\n\n x_tile = (self.FIRST_TILE)\n y_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n color = self.BLACK\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES][self.HALF_SQUARES-1] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)\n\n x_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n y_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n color = self.WHITE\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES][self.HALF_SQUARES] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)", "def test_tiler_make_tiles(create_data):\n\n data = Tiler.make_tiles(\n image_path=create_data['tiffile'],\n link_base=create_data['out_path'],\n output_folder=create_data['out_path'],\n zoom=[7, 8],\n quiet=False,\n nodata=[0],\n # convert=True\n )\n\n assert(os.path.isfile(create_data['tiffile']))\n assert(len(data) == 2)\n assert(data[0] == create_data['out_path_check'])\n assert(os.path.exists(data[0]))\n assert(os.path.isfile(data[1]))\n\n zoom_7 = os.path.join(data[0], '7')\n zoom_8 = os.path.join(data[0], '8')\n zoom_9 = os.path.join(data[0], '9')\n\n assert(os.path.exists(zoom_7))\n assert(os.path.exists(zoom_8))\n assert(not os.path.exists(zoom_9))", "def _prep_tiles(self):\r\n # todo: write this. expected output is a flat iterable.\r\n # todo: explore turning flatten() into generator\r\n\r\n if self._bounds and not self._tiles:\r\n # build tile list from bounds\r\n self._zoom = self._detail + Pin.find_span_zoom(self._bounds)\r\n self._tiles = Tile.from_pins(self._bounds, self._zoom) # get the tiles covering the span\r\n Tile.new_tile_q.join() # wait for tiles to arrive\r\n\r\n if self._tiles and not self._bounds:\r\n sw_pin = Pin.from_tile_coord(np.min(self._X), np.max(self._Y) + 1, self._zoom)\r\n ne_pin = Pin.from_tile_coord(np.max(self._X) + 1, np.min(self._Y), self._zoom)\r\n self._bounds = sw_pin, ne_pin\r\n\r\n assert all(isinstance(t, Tile) for t in self._tiles), f'{self._tiles}' # all objects must be tiles\r\n self._X, self._Y, zooms = np.asarray(list(self._tiles)).T # asarray won't work on sets. ugh.\r\n assert all(zooms == zooms[0]) # all zooms must be the same\r\n self._zoom = zooms[0]", "def create_tiles(x_tiles, y_tiles):\n tiles = []\n total = x_tiles * y_tiles\n counter = 0\n\n print(\"create_tiles from '{}' -> '{}'\".format(0, total))\n for j in range(x_tiles):\n for i in range(y_tiles):\n tiles.append(Tile(counter, i, j))\n counter += 1\n\n return tiles", "def generate_tiles(region, delete_used_dir = True):\n directory_structure_for_region(region)\n for png in tqdm(listdir(TILE_PICTURE_LOCATIONS + region + ORIGINAL)):\n #change to include negative numbers\n match = search(r'\\d+', png)\n year = match.group()\n mask_images(region, year + \".png\") \n make_transparent_png(region, year + \".png\")\n geotiff_create(region, year + \".png\")\n create_raster_tiles(region, year + \".tif\", year)\n if delete_used_dir:\n delete_directory_contents(region, MASKED)\n delete_directory_contents(region, TRANSPARENT_PNG)\n delete_directory_contents(region, GEOTIFF)\n delete_directory_contents(region, TRANSPARENT_PNG)\n delete_directory_contents(region, INTERTIFF)\n delete_directory_contents(region, TRANSLATED_PNG)", "def generate_openlayers(self):\n\n args = {}\n args['title'] = self.options.title\n args['bingkey'] = self.options.bingkey\n (args['south'], args['west'], args['north'], args['east']) = \\\n self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url\n args['copyright'] = self.options.copyright\n if self.options.tmscompatible:\n args['tmsoffset'] = '-1'\n else:\n args['tmsoffset'] = ''\n if self.options.profile == 'raster':\n args['rasterzoomlevels'] = self.tmaxz + 1\n args['rastermaxresolution'] = 2 ** self.nativezoom \\\n * self.out_gt[1]\n\n s = \\\n \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n .olImageLoadError { display: none; }\n .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }\n </style>\"\"\" \\\n % args\n\n if self.options.profile == 'mercator':\n s += \\\n \"\"\"\n <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>\"\"\" \\\n % args\n\n s += \\\n \"\"\"\n <script src=\"http://www.openlayers.org/api/2.12/OpenLayers.js\"></script>\n <script>\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n var emptyTileURL = \"http://www.maptiler.org/img/none.png\";\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n\n function init(){\"\"\" \\\n % args\n\n if self.options.profile == 'mercator':\n s += \\\n \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:900913\",\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n numZoomLevels: 20\n };\n map = new OpenLayers.Map(options);\n\n // Create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n {\n type: google.maps.MapTypeId.ROADMAP,\n sphericalMercator: true\n });\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {\n type: google.maps.MapTypeId.SATELLITE,\n sphericalMercator: true\n });\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {\n type: google.maps.MapTypeId.HYBRID,\n sphericalMercator: true\n });\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {\n type: google.maps.MapTypeId.TERRAIN,\n sphericalMercator: true\n });\n\n // Create Bing layers\n var broad = new OpenLayers.Layer.Bing({\n name: \"Bing Roads\",\n key: \"%(bingkey)s\",\n type: \"Road\",\n sphericalMercator: true\n });\n var baer = new OpenLayers.Layer.Bing({\n name: \"Bing Aerial\",\n key: \"%(bingkey)s\",\n type: \"Aerial\",\n sphericalMercator: true\n });\n var bhyb = new OpenLayers.Layer.Bing({\n name: \"Bing Hybrid\",\n key: \"%(bingkey)s\",\n type: \"AerialWithLabels\",\n sphericalMercator: true\n });\n\n // Create OSM layer\n var osm = new OpenLayers.Layer.OSM(\"OpenStreetMap\");\n\n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([gmap, gsat, ghyb, gter,\n broad, baer, bhyb,\n osm, tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));\n \"\"\" \\\n % args\n elif self.options.profile == 'geodetic':\n\n s += \\\n \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:4326\"\n };\n map = new OpenLayers.Map(options);\n\n var wms = new OpenLayers.Layer.WMS(\"VMap0\",\n \"http://tilecache.osgeo.org/wms-c/Basic.py?\",\n {\n layers: 'basic',\n format: 'image/png'\n }\n );\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([wms,tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds);\n \"\"\" \\\n % args\n elif self.options.profile == 'raster':\n\n s += \\\n \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map(options);\n\n var layer = new OpenLayers.Layer.TMS(\"TMS Layer\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n getURL: getURL\n });\n\n map.addLayer(layer);\n map.zoomToExtent(mapBounds);\n \"\"\" \\\n % args\n\n s += \\\n \"\"\"\n map.addControls([new OpenLayers.Control.PanZoomBar(),\n new OpenLayers.Control.Navigation(),\n new OpenLayers.Control.MousePosition(),\n new OpenLayers.Control.ArgParser(),\n new OpenLayers.Control.Attribution()]);\n }\n \"\"\" \\\n % args\n\n if self.options.profile == 'mercator':\n s += \\\n \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {\n z+=1;\n }\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" \\\n % args\n elif self.options.profile == 'geodetic':\n\n s += \\\n \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom()%(tmsoffset)s;\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" \\\n % args\n elif self.options.profile == 'raster':\n\n s += \\\n \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" \\\n % args\n\n s += \\\n \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"http://www.maptiler.org/\">MapTiler</a>/<a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" \\\n % args\n\n return s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For given dataset and query in cartographic coordinates returns parameters for ReadRaster() in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the extent is returned in the native resolution of dataset ds.
def geo_query( self, ds, ulx, uly, lrx, lry, querysize=0, ): geotran = ds.GetGeoTransform() rx = int((ulx - geotran[0]) / geotran[1] + 0.001) ry = int((uly - geotran[3]) / geotran[5] + 0.001) rxsize = int((lrx - ulx) / geotran[1] + 0.5) rysize = int((lry - uly) / geotran[5] + 0.5) if not querysize: (wxsize, wysize) = (rxsize, rysize) else: (wxsize, wysize) = (querysize, querysize) # Coordinates should not go out of the bounds of the raster wx = 0 if rx < 0: rxshift = abs(rx) wx = int(wxsize * (float(rxshift) / rxsize)) wxsize = wxsize - wx rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize)) rx = 0 if rx + rxsize > ds.RasterXSize: wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize)) rxsize = ds.RasterXSize - rx wy = 0 if ry < 0: ryshift = abs(ry) wy = int(wysize * (float(ryshift) / rysize)) wysize = wysize - wy rysize = rysize - int(rysize * (float(ryshift) / rysize)) ry = 0 if ry + rysize > ds.RasterYSize: wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize)) rysize = ds.RasterYSize - ry return ((rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
[ "def _get_extent(inp):\n d = inp.dimensions\n return [0, d[0]-1, 0, d[1]-1, 0, d[2]-1]", "def read_gdal_coordinates(dataset, mode='centers', z=True):\n coordinates_pixel = pixel_coordinates(dataset.RasterXSize,\n dataset.RasterYSize, mode)\n geotransform = dataset.GetGeoTransform()\n if z:\n coordinates = pixel_to_map3d(geotransform, coordinates_pixel)\n else:\n coordinates = pixel_to_map(geotransform, coordinates_pixel)\n return (coordinates)", "def query_for_image_dims(\n query_engine: QueryEngineType,\n focal_plane_image_series_id: int\n) -> Tuple[float, float]:\n\n query = f\"\"\"\n select \n im.height as height,\n im.width as width\n from specimens sp\n join specimens spp on spp.id = sp.parent_id\n join image_series imser on imser.specimen_id = spp.id\n join sub_images si on si.image_series_id = imser.id\n join images im on im.id = si.image_id\n join treatments tm on tm.id = im.treatment_id\n where\n imser.id = {focal_plane_image_series_id}\n and tm.name = 'Biocytin'\n \"\"\"\n result = query_engine(query)\n return result[0][\"width\"], result[0][\"height\"]", "def getRasterExtent(raster_in):\n if not os.path.isfile(raster_in):\n return []\n raster = gdal.Open(raster_in, GA_ReadOnly)\n if raster is None:\n return []\n geotransform = raster.GetGeoTransform()\n originX = geotransform[0]\n originY = geotransform[3]\n spacingX = geotransform[1]\n spacingY = geotransform[5]\n r, c = raster.RasterYSize, raster.RasterXSize\n\n minX = originX\n maxY = originY\n maxX = minX + c * spacingX\n minY = maxY + r * spacingY\n return [minX, maxX, minY, maxY]", "def GetTiledSizeAndOrigin(self, *int, **kwargs):\n ...", "def scale_query_to_tile(\n self,\n dsquery,\n dstile,\n tilefilename='',\n ):\n\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n tilebands = dstile.RasterCount\n\n if self.options.resampling == 'average':\n\n # Function: gdal.RegenerateOverview()\n\n for i in range(1, tilebands + 1):\n\n # Black border around NODATA\n # if i != 4:\n # dsquery.GetRasterBand(i).SetNoDataValue(0)\n\n res = gdal.RegenerateOverview(dsquery.GetRasterBand(i),\n dstile.GetRasterBand(i), 'average')\n if res != 0:\n self.error('RegenerateOverview() failed on %s, error %d'\n % (tilefilename, res))\n elif self.options.resampling == 'antialias':\n\n # Scaling by PIL (Python Imaging Library) - improved Lanczos\n\n array = numpy.zeros((querysize, querysize, tilebands),\n numpy.uint8)\n for i in range(tilebands):\n array[:, :, i] = \\\n gdalarray.BandReadAsArray(dsquery.GetRasterBand(i\n + 1), 0, 0, querysize, querysize)\n im = Image.fromarray(array, 'RGBA') # Always four bands\n im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)\n if os.path.exists(tilefilename):\n im0 = Image.open(tilefilename)\n im1 = Image.composite(im1, im0, im1)\n im1.save(tilefilename, self.tiledriver)\n else:\n\n # Other algorithms are implemented by gdal.ReprojectImage().\n\n dsquery.SetGeoTransform((\n 0.0,\n tilesize / float(querysize),\n 0.0,\n 0.0,\n 0.0,\n tilesize / float(querysize),\n ))\n dstile.SetGeoTransform((\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 0.0,\n 1.0,\n ))\n\n res = gdal.ReprojectImage(dsquery, dstile, None, None,\n self.resampling)\n if res != 0:\n self.error('ReprojectImage() failed on %s, error %d'\n % (tilefilename, res))", "def calculate_extent(shape, resolution, offset=(0, 0), ax=None, dimension=None, **kwargs):\n # Calculate a specific extent if a resolution is specified\n if resolution is not None:\n\n # Validate relevant parameters\n for n, v in (('shape', shape), ('resolution', resolution), ('offset', offset)):\n if not isinstance(v, tuple) or len(v) != 2:\n raise TypeError(f'`{n}` must be a tuple of length 2.')\n\n # Calculate extent values, and extract units\n ypx, xpx = shape\n l, r, x_unit = calculate_axis_extent(resolution[0], xpx, offset=offset[0], **kwargs)\n b, t, y_unit = calculate_axis_extent(resolution[1], ypx, offset=offset[1], **kwargs)\n\n # Optionally set the axis labels\n if ax is not None:\n\n # Extract the dimension name\n if isinstance(dimension, (tuple, list)): # different value for each dimension\n if len(dimension) != 2:\n raise TypeError('`dimension` must be a tuple or list of length 2.')\n x_dim = str(dimension[0])\n y_dim = str(dimension[1])\n elif dimension is None: # default values\n x_dim, y_dim = 'x-axis', 'y-axis'\n elif isinstance(dimension, str): # single value for both dimensions\n x_dim = y_dim = str(dimension)\n else:\n raise TypeError('`dimension` must be a tuple or list of length 2.')\n ax.set_xlabel(f'{x_dim} ({x_unit})')\n ax.set_ylabel(f'{y_dim} ({y_unit})')\n\n return l, r, b, t # extent\n\n return None # default extent", "def _add_lat_lon_dimensions(self, dataset, height, width):\n if self.crs.is_geographic:\n # Latitude and Longitude variables\n dataset.createDimension('lon', width)\n dataset.createDimension('lat', height)\n\n lats = dataset.createVariable('lat', 'f8', dimensions=('lat'))\n lons = dataset.createVariable('lon', 'f8', dimensions=('lon'))\n\n lats.standard_name = 'latitude'\n lats.long_name = 'latitude'\n lats.units = 'degrees_north'\n lats._CoordinateAxisType = \"Lat\" # noqa W0212\n\n lons.standard_name = 'longitude'\n lons.long_name = 'longitude'\n lons.units = 'degrees_east'\n lons._CoordinateAxisType = \"Lon\" # noqa W0212\n\n dimensions = ('lat', 'lon')\n else:\n dataset.createDimension('x', width)\n dataset.createDimension('y', height)\n\n lats = dataset.createVariable('y', 'f8', dimensions=('y'))\n lons = dataset.createVariable('x', 'f8', dimensions=('x'))\n\n lats.standard_name = 'projection_y_coordinate'\n lats.long_name = 'Northing'\n # TODO: How do we know if it's meters or something else?\n # lats.units = 'meters'\n lats._CoordinateAxisType = \"GeoY\"\n\n lons.standard_name = 'projection_x_coordinate'\n lons.long_name = \"Easting\"\n lons._CoordinateAxisType = \"GeoX\"\n\n dimensions = 'y', 'x'\n\n crs = dataset.createVariable('spatial_ref', 'i4')\n crs.spatial_ref = self.crs.wkt\n\n # Transform the cell indices to lat/lon based on the image crs\n # and transform\n x_coords, _ = rasterio.transform.xy(self.transform, np.zeros(width),\n np.arange(width))\n _, y_coords = rasterio.transform.xy(self.transform, np.arange(height),\n np.zeros(height))\n\n lons[:] = x_coords\n lats[:] = y_coords\n\n return dimensions", "def test_read_getextent(tmpdir, driver, testdata_generator):\n schema, crs, records1, records2, _ = testdata_generator(\n driver, range(0, 10), range(10, 20)\n )\n path = str(tmpdir.join(get_temp_filename(driver)))\n positions = {int(r['properties']['position']) for r in records1 + records2}\n\n with fiona.open(\n path,\n \"w\",\n driver=driver,\n crs=crs,\n schema=schema,\n ) as c:\n c.writerecords(records1)\n c.writerecords(records2)\n\n with fiona.open(path) as c:\n data = set()\n for _ in range(len(records1)):\n f = next(c)\n data.add(int(f['properties']['position']))\n\n # Call to OGR_L_GetExtent\n try:\n c.bounds\n except DriverError:\n pass\n\n for _ in range(len(records1)):\n f = next(c)\n data.add(int(f['properties']['position']))\n assert len(positions) == len(data)\n for p in positions:\n assert p in data", "def get_image_pixel_size(): \n function = LegacyFunctionSpecification() \n function.addParameter('nx', dtype='i', direction=function.OUT)\n function.addParameter('ny', dtype='i', direction=function.OUT)\n function.result_type = 'i'\n return function", "def read(fnm, in_memory, ibands=ALL, bandclass=CompressedBand):\n hdr = dict()\n dataset = osgeo.gdal.Open(fnm, gc.GA_ReadOnly)\n\n if ibands == ALL:\n ibands = list(range(1, dataset.RasterCount+1))\n elif not hasattr(ibands, \"__iter__\"):\n ibands = [ibands]\n\n try:\n hdr[\"nx\"] = dataset.RasterXSize\n hdr[\"ny\"] = dataset.RasterYSize\n\n transform = dataset.GetGeoTransform()\n if transform is not None:\n hdr[\"dx\"] = transform[1]\n hdr[\"dy\"] = transform[5]\n hdr[\"xulcorner\"] = transform[0]\n hdr[\"yulcorner\"] = transform[3]\n hdr[\"sx\"] = transform[2]\n hdr[\"sy\"] = transform[4]\n else:\n raise AttributeError(\"No GeoTransform in geotiff file\")\n\n sr = SRS_from_WKT(dataset.GetProjectionRef())\n if sr is not None:\n hdr[\"srs\"] = {\"proj4\": sr.ExportToProj4(),\n \"semimajor\": sr.GetSemiMajor(),\n \"flattening\": sr.GetInvFlattening(),\n \"name\": sr.GetAttrValue('PROJCS')}\n else:\n hdr[\"srs\"] = {\"proj4\": \"\",\n \"semimajor\": 6370997.0,\n \"flattening\": 1.0 / 298.257223563,\n \"name\": \"NA\"}\n\n max_dtype = 0\n rasterbands = [dataset.GetRasterBand(i) for i in ibands]\n hdr[\"nodata\"] = rasterbands[0].GetNoDataValue()\n nx = rasterbands[0].XSize\n ny = rasterbands[0].YSize\n if rasterbands[0].DataType > max_dtype:\n max_dtype = rasterbands[0].DataType\n\n if in_memory:\n dtype = numpy_dtype(rasterbands[0].DataType)\n bands = [bandclass((ny, nx), dtype) for _ in ibands]\n for i, rb in enumerate(rasterbands):\n _arr = rb.ReadAsArray(buf_obj=np.empty([ny, nx], dtype=dtype))\n if _arr is None:\n raise IOError(\"error reading GDAL band {}\".format(i+1))\n bands[i].setblock(0, 0, _arr.squeeze()[::-1])\n else:\n bands = [GdalFileBand(rb, dataset) for rb in rasterbands]\n\n finally:\n if in_memory:\n dataset = None\n return bands, hdr", "def get_spatial_ext_chunk_sizes(ds_or_path: Union[xr.Dataset, str]) -> Dict[str, int]:\n if isinstance(ds_or_path, str):\n ds = xr.open_dataset(ds_or_path, decode_times=False)\n else:\n ds = ds_or_path\n lon_name = get_lon_dim_name(ds)\n lat_name = get_lat_dim_name(ds)\n if lon_name and lat_name:\n chunk_sizes = get_ext_chunk_sizes(ds, {lat_name, lon_name})\n else:\n chunk_sizes = None\n if isinstance(ds_or_path, str):\n ds.close()\n return chunk_sizes", "def index_raster(dataset, lat, lon):\n\n lat_idx = (lat - dataset.y) / dataset.height\n lon_idx = (lon - dataset.x) / dataset.width\n try:\n return dataset.data[lat_idx, lon_idx]\n except IndexError:\n return numpy.inf", "def raw_resolution(splitter=False):\n width, height = RESOLUTION\n if splitter:\n fwidth = (width + 15) & ~15\n else:\n fwidth = (width + 31) & ~31\n fheight = (height + 15) & ~15\n return fwidth, fheight", "def get_aoi_dimensions(path_aoi, crs_output):\n\n aoi = gpd.read_file(path_aoi)\n\n aoi = aoi.to_crs(crs={'init': crs_output})\n\n aoi_shape = aoi.geometry.values.tolist()[-1]\n aoi_width = aoi_shape.bounds[2] - aoi_shape.bounds[0]\n aoi_height = aoi_shape.bounds[3] - aoi_shape.bounds[1]\n\n return aoi_shape, aoi_width, aoi_height", "def extent_2d(self):\n cosine = math.cos(math.radians(self.rot))\n sine = math.sin(math.radians(self.rot))\n width = (self.nx - 1) * self.dx\n height = (self.ny - 1) * self.dy\n xy0 = (self.x0, self.y0)\n xy1 = (self.x0 + width * cosine, self.y0 + width * sine)\n xy2 = (self.x0 + width * cosine - height * sine, self.y0 + width * sine + height * cosine)\n xy3 = (self.x0 - height * sine, self.y0 + height * cosine)\n\n minxy = (min(xy0[0], xy1[0], xy2[0], xy3[0]),\n min(xy0[1], xy1[1], xy2[1], xy3[1]))\n maxxy = (max(xy0[0], xy1[0], xy2[0], xy3[0]),\n max(xy0[1], xy1[1], xy2[1], xy3[1]))\n\n return minxy, maxxy", "def get_raster_metadata(file, con_stats=True):\n raster = gdal.Open(file, gdal.GA_ReadOnly)\n if raster is None:\n return None\n\n # extraemos toda info de proyeccion del raster usando gdal\n srid, proj, extent_capa = _get_raster_proj_info(raster)\n\n # extraemos todos los metadatos del raster usando gdalinfo\n metadata_gdalinfo_json = _run_gdalinfo(file, con_stats)\n\n # esto no es correcto pero va a evitar que explote si faltan metadatos\n extent_capa_4326 = extent_capa\n try:\n extent_capa_4326 = extentConvert(extent_capa, metadata_gdalinfo_json['coordinateSystem']['wkt'], 'EPSG:4326')\n except:\n pass\n\n # print \"Calculated extent: %s\"%(str(extent_capa))\n # extent_capa_4326 = _get_polygon_extent(metadata_gdalinfo_json['wgs84Extent']['coordinates'][0])\n # print \"GDAL Info: proj: %s, srid: %s, extent 4326: %s\"%(\n # metadata_gdalinfo_json['coordinateSystem']['wkt'],\n # str(srid),\n # extentConvert(extent_capa, metadata_gdalinfo_json['coordinateSystem']['wkt'], 'EPSG:4326')\n # )\n # if 'wgs84Extent' in metadata_gdalinfo_json:\n # try:\n # extent_capa_4326 = _get_polygon_extent(metadata_gdalinfo_json['wgs84Extent']['coordinates'][0])\n # except:\n # pass\n\n\n variables_detectadas = {}\n subdatasets = []\n # Segun el formato del raster, determinamos las bandas para armar los mapas 'layer_raster_band' (mapas de variables)\n if raster.GetDriver().ShortName == 'GRIB':\n # en el caso de GRIB nos interesan los elementos en 'bands'\n if 'bands' in metadata_gdalinfo_json:\n wind_u_band = wind_v_band = None\n for banda in metadata_gdalinfo_json['bands']:\n try: # si por algun motivo la banda no tiene la info necesaria la ignoramos\n nro_banda = banda['band']\n grib_element = banda['metadata']['']['GRIB_ELEMENT']\n grib_comment = banda['metadata']['']['GRIB_COMMENT']\n minimo = banda.get('minimum')\n maximo = banda.get('maximum')\n\n if grib_element in ('UGRD', 'UOGRD'):\n wind_u_band = nro_banda\n elif grib_element in ('VGRD', 'VOGRD'):\n wind_v_band = nro_banda\n else:\n variables_detectadas[nro_banda] = {\n 'elemento': grib_element,\n 'descripcion': grib_comment,\n 'rango': (minimo, maximo), # almacenamos el rango de la banda por si lo necesitamos en el DATARANGE\n }\n except:\n pass\n\n if wind_u_band and wind_v_band:\n nro_banda = '{},{}'.format(wind_u_band, wind_v_band)\n variables_detectadas[nro_banda] = {\n 'elemento': 'WIND',\n 'descripcion': 'Wind',\n 'rango': (None, None), # Por el momento no necesitamos rangos para WIND, ya que la simbologia usa uv_length y uv_angle\n }\n wind_u_band = wind_v_band = None\n\n # Ahora analizamos subdatasets\n for subdataset in raster.GetSubDatasets():\n # Ejemplo de un subdataset: ('NETCDF:\"/vagrant/data/SABANCAYA_2018062806_fcst_VAG_18.res.nc\":TOPOGRAPHY', '[41x65] TOPOGRAPHY (32-bit floating-point)')\n # Ejemplo de un subdataset: ('HDF5:\"data/RMA1_0201_01_TH_20180713T164924Z.H5\"://dataset1/data1/data', '[360x526] //dataset1/data1/data (64-bit floating-point)')\n raster_subdataset = gdal.Open(subdataset[0], gdal.GA_ReadOnly)\n srid, proj, extent = _get_raster_proj_info(raster_subdataset)\n subdataset_gdalinfo_json = _run_gdalinfo(subdataset[0], con_stats)\n formato, path, identificador = subdataset[0].split(':')\n # Creamos la siguiente estructura para guardar todala info en la IDE, independientemente del formato\n subdatasets.append({\n 'definicion': subdataset, # Ej: ('HDF5:/path/al/archivo:identificador', [alguna descripcion])\n 'identificador': identificador, # Ej: TOPOGRAPHY\n 'gdalinfo': subdataset_gdalinfo_json, # toda la matadata provista por gdalinfo para el subdataset actual\n })\n\n # Y en el caso de netCDF y HDF5, detectamos variables para crear los mapas layer_raster_band, como hacemos con GRIBs\n # Tomamos la primer banda por convencion, ya que mapserver no permite trabajar especificamente una banda dentro de un subdataset (la unidad es el dataset),\n # y en todos los casos que vimos las bandas tienen la misma variable, solo cambia el timestamp\n if 'bands' in subdataset_gdalinfo_json:\n banda0 = subdataset_gdalinfo_json['bands'][0]\n if raster.GetDriver().ShortName == 'netCDF':\n variables_detectadas[identificador] = {\n 'elemento': banda0['metadata'][''].get('NETCDF_VARNAME', ''), # aparentemente todo netCDF tiene este campo y es igual para toda banda del subdataset\n 'descripcion': banda0['metadata'][''].get('description', ''), # algunos netCDF no tienen este campo\n 'rango': (banda0.get('minimum'), banda0.get('maximum')), # en principio este rango no nos interesa porque este formato se renderiza directamente, va por compatibilidad\n 'extent': extent # extent, necesario para cada mapa layer_raster_band\n }\n elif raster.GetDriver().ShortName == 'HDF5':\n variables_detectadas[identificador] = {\n 'elemento': subdataset_gdalinfo_json['metadata'][''].get('what_object', ''), # aparentemente los HDF5 de SMN tienen toooodo duplicado en todas bandas y son todas iguales\n 'descripcion': '', # no encontre nada para cargar...\n 'rango': (None, None), # no nos interesa este campo, solo por compatibilidad\n 'extent': extent # extent, necesario para cada mapa layer_raster_band\n }\n else:\n # Necesitamos info estructural especifica si es otro formato...\n pass\n\n # Lamentablemente hay inconsistencias en algunos archivos analizados con respecto al extent:\n # a veces el de la capa no coincide con el de los subdatasets. Tomamos el primero, que se utilizara para renderizar\n if len(subdatasets) > 0:\n extent_capa = variables_detectadas[subdatasets[0]['identificador']]['extent']\n try:\n # los casos analizados NO incluyen informacion de la proyeccion en bandas, solo coordenadas que parecen ser 4326, como no hay garantia intento reproyectarlo\n extent_capa_4326 = extentConvert(extent_capa, 'EPSG:4326', 'EPSG:4326')\n except:\n pass\n\n # construimos la respuesta\n return {\n 'driver_short_name': raster.GetDriver().ShortName,\n 'driver_long_name': raster.GetDriver().LongName,\n 'raster_count': raster.RasterCount,\n 'subdataset_count': len(raster.GetSubDatasets()),\n 'srid': srid, # puede ser None\n 'extent_capa': extent_capa,\n 'extent_capa_4326': extent_capa_4326,\n 'metadata_json': {\n 'gdalinfo': metadata_gdalinfo_json,\n 'variables_detectadas': variables_detectadas,\n 'subdatasets': subdatasets,\n },\n 'proyeccion_proj4': proj.ExportToProj4(),\n 'size_height': raster.RasterYSize,\n 'size_width': raster.RasterXSize,\n }", "def test_read_gdal_raster_stats_with_subdatasets_in_netcdf():\n netcdf_path = get_test_data_file(\"binary/stac_proj_extension/netcdf/multiple_bands.nc\")\n\n raster_metadata: AssetRasterMetadata = read_gdal_raster_metadata(str(netcdf_path))\n\n assert len(raster_metadata.statistics) == 13\n expected_band_names = {\n \"B02\",\n \"B03\",\n \"B04\",\n \"B05\",\n \"B06\",\n \"B07\",\n \"B08\",\n \"B11\",\n \"B12\",\n \"DEM\",\n \"temperature_mean\",\n \"VH\",\n \"VV\",\n }\n assert set(raster_metadata.statistics.keys()) == expected_band_names\n for band_name, band_stats in raster_metadata.statistics.items():\n assert band_stats.minimum is not None\n assert band_stats.maximum is not None\n assert band_stats.mean is not None\n assert band_stats.stddev is not None\n\n # valid_percent can be None though. gdalinfo does not always give us a value for this.\n if band_stats.valid_percent is None:\n logging.warning(f\"band:{band_name} has no value for valid_percent: {band_stats.valid_percent=}\")\n\n assert raster_metadata.projection == {\n \"proj:epsg\": 4326,\n # For some reason gdalinfo reports the bounds in the wrong order here.\n # I think the reason might be that the pixels are south-up instead of\n # north-up, i.e. the scale for the Y-axis of the pixel is negative.\n # Upper Left corner is BELOW Lower Left corner, which is unexpected.\n # gdalinfo reports that CRS is EPSG:4326, X=lon, Y=lat.\n #\n # From gdalinfo:\n # Corner Coordinates:\n # Upper Left ( 0.0, 0.0)\n # Lower Left ( 0.0, 3.0)\n # Upper Right ( 49.0, 0.0)\n # Lower Right ( 49.0, 3.0)\n # Center ( 24.5, 1.5)\n #\n # Would expect this proj:bbox value with the normal order of the corners:\n # \"proj:bbox\": approx([0.0, 0.0, 49.0, 3.O]),\n \"proj:bbox\": approx([0.0, 3.0, 49.0, 0.0]),\n \"proj:shape\": [49, 3],\n }", "def test_get_rasterioDataset_attrs(self):\n self.assertIsNotNone(self.cds.read(1))\n with self.assertRaises(AttributeError):\n _ = self.cds.lat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scales down query dataset to the tile dataset
def scale_query_to_tile( self, dsquery, dstile, tilefilename='', ): querysize = dsquery.RasterXSize tilesize = dstile.RasterXSize tilebands = dstile.RasterCount if self.options.resampling == 'average': # Function: gdal.RegenerateOverview() for i in range(1, tilebands + 1): # Black border around NODATA # if i != 4: # dsquery.GetRasterBand(i).SetNoDataValue(0) res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), 'average') if res != 0: self.error('RegenerateOverview() failed on %s, error %d' % (tilefilename, res)) elif self.options.resampling == 'antialias': # Scaling by PIL (Python Imaging Library) - improved Lanczos array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8) for i in range(tilebands): array[:, :, i] = \ gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1), 0, 0, querysize, querysize) im = Image.fromarray(array, 'RGBA') # Always four bands im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS) if os.path.exists(tilefilename): im0 = Image.open(tilefilename) im1 = Image.composite(im1, im0, im1) im1.save(tilefilename, self.tiledriver) else: # Other algorithms are implemented by gdal.ReprojectImage(). dsquery.SetGeoTransform(( 0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize), )) dstile.SetGeoTransform(( 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, )) res = gdal.ReprojectImage(dsquery, dstile, None, None, self.resampling) if res != 0: self.error('ReprojectImage() failed on %s, error %d' % (tilefilename, res))
[ "def _rescale(self, scale_factor: int) -> xr.DataArray:\n nlat_new = self.nlat / scale_factor\n nlon_new = self.nlon / scale_factor\n assert nlat_new % 1.0 == 0.0, 'The dataset\\'s dimension \\'lat\\' has size {}, '\\\n 'must be fully dividable by the argument \\'scale_factor\\' ({}).'.format(\n self.nlat, scale_factor)\n assert nlon_new % 1.0 == 0.0, 'The dataset\\'s dimension \\'lon\\' has size {}, '\\\n 'must be fully dividable by the argument \\'scale_factor\\' ({}).'.format(\n self.nlon, scale_factor)\n nlat_new = int(nlat_new)\n nlon_new = int(nlon_new)\n # Coordinates needed to build new xr.Dataset.\n coords = [\n xr.IndexVariable('lat_index', np.arange(scale_factor)),\n xr.IndexVariable('lon_index', np.arange(scale_factor)),\n xr.IndexVariable(\n 'var', self.var[self._vardim]) if self._vardim is not None else 0,\n xr.IndexVariable('lat',\n np.linspace(90 - 180 / (nlat_new) / 2, -90 + 180 / (nlat_new) / 2, nlat_new)),\n xr.IndexVariable('lon',\n np.linspace(-180 + (360 / nlon_new / 2), 180 - (360 / nlon_new / 2), nlon_new))\n ]\n if self._vardim is None:\n coords.pop(2)\n\n with self.var as ds:\n # Optionally add time dimension.\n if self.hastime:\n coords = [ds.time] + coords\n rs = (\n self.ntime,\n nlat_new,\n scale_factor,\n nlon_new,\n scale_factor)\n tr = (0, 2, 4, 1, 3)\n # Reshape data.\n data = ds.data.reshape(*rs).transpose(*tr)\n else:\n if self._vardim is not None:\n rs = (\n self._vardim_len,\n nlat_new,\n scale_factor,\n nlon_new,\n scale_factor)\n tr = (2, 4, 0, 1, 3)\n else:\n rs = (\n nlat_new,\n scale_factor,\n nlon_new,\n scale_factor)\n tr = (1, 3, 0, 2)\n # Reshape data.\n data = ds.data.reshape(*rs).transpose(*tr)\n\n # Create DataArray.\n var = xr.DataArray(\n data,\n coords=coords\n )\n # Put attributes back.\n var.attrs = self.var.attrs\n\n return var", "def scale_data(self):\n pass", "def scale_database(self, database):\n database.in_train.set_value(self.scale_raw_data(database.in_train.get_value(borrow=True)))\n database.in_train_valid.set_value(self.scale_raw_data(database.in_train_valid.get_value(borrow=True)))\n database.in_test.set_value(self.scale_raw_data(database.in_test.get_value(borrow=True)))", "def resize(self, new_size):\n if len(self.tile_data)>new_size:\n self.tile_data = self.tile_data[:new_size]\n else:\n for _ in range(len(self.tile_data),new_size):\n self.tile_data.append(Tile())", "def rescaled_image():", "def DownSize(self,downsize):\n \n # if 1 of dim sizes is zero then there's no data, so return\n if np.prod(np.array(self.DataShape))==0:\n return\n \n # make list if int downsize provided by user\n if isinstance(downsize,int):\n downsize = [downsize,downsize]\n # use skimage.transform: \n self.data = downscale_local_mean(self.data,(1,1,1,1,1,*downsize))\n # NY and NX will have changed, so:\n self.updateDimensions()", "def _resize(self):\n avg_frames = 87 #this is the average image frame length in the entire dataset\n for i in range(len(self.data)):\n image = self.data[i]\n self.data[i] = resize(image, width=avg_frames, height=len(image))", "def rescale(self,scale_factor):\n self.scale_factor *= scale_factor\n\n if (self._orig_bitmap):\n self.bitmap = copy.copy(self._orig_bitmap)\n self.bitmap.image = self._orig_bitmap.zoom(self.scale_factor)", "def set_scale(self,scale_factor):\n self.scale_factor = scale_factor\n\n if (self._orig_bitmap):\n self.bitmap = copy.copy(self._orig_bitmap)\n self.bitmap.image = self._orig_bitmap.zoom(self.scale_factor)", "def scale_images(self,zoom_factor=None):\n pass", "def _prep_tiles(self):\r\n # todo: write this. expected output is a flat iterable.\r\n # todo: explore turning flatten() into generator\r\n\r\n if self._bounds and not self._tiles:\r\n # build tile list from bounds\r\n self._zoom = self._detail + Pin.find_span_zoom(self._bounds)\r\n self._tiles = Tile.from_pins(self._bounds, self._zoom) # get the tiles covering the span\r\n Tile.new_tile_q.join() # wait for tiles to arrive\r\n\r\n if self._tiles and not self._bounds:\r\n sw_pin = Pin.from_tile_coord(np.min(self._X), np.max(self._Y) + 1, self._zoom)\r\n ne_pin = Pin.from_tile_coord(np.max(self._X) + 1, np.min(self._Y), self._zoom)\r\n self._bounds = sw_pin, ne_pin\r\n\r\n assert all(isinstance(t, Tile) for t in self._tiles), f'{self._tiles}' # all objects must be tiles\r\n self._X, self._Y, zooms = np.asarray(list(self._tiles)).T # asarray won't work on sets. ugh.\r\n assert all(zooms == zooms[0]) # all zooms must be the same\r\n self._zoom = zooms[0]", "def download_tile_tms(tile, imagery, folder, zoom, supertile):\n\n image_format = get_image_format(imagery['url'])\n r = requests.get(url(tile.split('-'), imagery['url']))\n tile_img = op.join(folder, '{}{}'.format(tile, image_format))\n tile = tile.split('-')\n\n #super-tile special case\n if supertile:\n new_zoom = zoom + 1 #get zoom from ml-enabler database\n # get children\n child_tiles = children(int(tile[0]), int(tile[1]), int(tile[2]), zoom=new_zoom)\n child_tiles.sort()\n\n new_dim = 256 * (2 * (new_zoom - zoom))\n\n w_lst = []\n for i in range (2 * (new_zoom - zoom)):\n for j in range(2 * (new_zoom - zoom)):\n window = Window(i * 256, j * 256, 256, 256)\n w_lst.append(window)\n\n # request children\n with rasterio.open(tile_img, 'w', driver='jpeg', height=new_dim,\n width=new_dim, count=3, dtype=rasterio.uint8) as w:\n for num, t in enumerate(child_tiles):\n t = [str(t[0]), str(t[1]), str(t[2])]\n r = requests.get(url(t, imagery['url']))\n img = np.array(Image.open(io.BytesIO(r.content)), dtype=np.uint8)\n try:\n img = img.reshape((256, 256, 3)) # 4 channels returned from some endpoints, but not all\n except ValueError:\n img = img.reshape((256, 256, 4))\n img = img[:, :, :3]\n img = np.rollaxis(img, 2, 0)\n w.write(img, window=w_lst[num])\n else:\n r = requests.get(url(tile, imagery['url']))\n with open(tile_img, 'wb')as w:\n w.write(r.content)\n return tile_img", "def scale_dataset(self, dataset):\n dataset.inputs -= self.scalar_mean\n dataset.inputs /= self.scalar_std", "def resize_terrain(data, x1, x2, y1, y2):\n\n data_subset = data[x1:x2, y1:y2]\n return data_subset", "def test_downsampling(self):\n # Test single band\n r = gr.Raster(datasets.get_path(\"landsat_B4\"), downsample=4)\n assert r.data.shape == (1, 164, 200)\n assert r.height == 164\n assert r.width == 200\n\n # Test multiple band\n r = gr.Raster(datasets.get_path(\"landsat_RGB\"), downsample=2)\n assert r.data.shape == (3, 328, 400)\n\n # Test that xy2ij are consistent with new image\n # Upper left\n assert r.xy2ij(r.bounds.left, r.bounds.top) == (0, 0)\n # Upper right\n assert r.xy2ij(r.bounds.right+r.res[0], r.bounds.top) == (0, r.width+1)\n # Bottom right\n assert r.xy2ij(r.bounds.right+r.res[0], r.bounds.bottom) == (r.height, r.width+1)\n # One pixel right and down\n assert r.xy2ij(r.bounds.left + r.res[0], r.bounds.top - r.res[1]) == (1, 1)", "def _changeThumbnails(self):\n ds = Dataset(self.current_dataset)\n self.thumbnails = np.copy(ds.preview_img)\n self.preview_idx = 0", "def resize(self, layers):", "def scale_pixels(data):\n data /= 255", "def resize_data(self, data, ratio=2):\n l,h,w,c = data.shape\n shape_out = (h//ratio,w//ratio,c)\n data_out = np.empty((l,h//ratio,w//ratio,c))\n print(\"Resizing dataset...\")\n for i in range(len(data_out)):\n # for i in range(4):\n data_out[i] = sk.transform.resize(data[i],shape_out)\n print(\"Done!\")\n return data_out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
def generate_googlemaps(self): args = {} args['title'] = self.options.title args['googlemapskey'] = self.options.googlekey (args['south'], args['west'], args['north'], args['east']) = \ self.swne args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['tilesize'] = self.tilesize args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['copyright'] = self.options.copyright s = \ """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml"> <head> <title>%(title)s</title> <meta http-equiv="content-type" content="text/html; charset=utf-8"/> <meta http-equiv='imagetoolbar' content='no'/> <style type="text/css"> v\:* {behavior:url(#default#VML);} html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; } body { margin: 10px; background: #fff; } h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; } #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; } #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;} #map { height: 95%%; border: 1px solid #888; } </style> <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s'></script> <script> //<![CDATA[ /* * Constants for given map * TODO: read it from tilemapresource.xml */ var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s)); var mapMinZoom = %(minzoom)s; var mapMaxZoom = %(maxzoom)s; var opacity = 0.75; var map; var hybridOverlay; /* * Create a Custom Opacity GControl * http://www.maptiler.org/google-maps-overlay-opacity-control/ */ var CTransparencyLENGTH = 58; // maximum width that the knob can move (slide width minus knob width) function CTransparencyControl( overlay ) { this.overlay = overlay; this.opacity = overlay.getTileLayer().getOpacity(); } CTransparencyControl.prototype = new GControl(); // This function positions the slider to match the specified opacity CTransparencyControl.prototype.setSlider = function(pos) { var left = Math.round((CTransparencyLENGTH*pos)); this.slide.left = left; this.knob.style.left = left+"px"; this.knob.style.top = "0px"; } // This function reads the slider and sets the overlay opacity level CTransparencyControl.prototype.setOpacity = function() { // set the global variable opacity = this.slide.left/CTransparencyLENGTH; this.map.clearOverlays(); this.map.addOverlay(this.overlay, { zPriority: 0 }); if (this.map.getCurrentMapType() == G_HYBRID_MAP) { this.map.addOverlay(hybridOverlay); } } // This gets called by the API when addControl(new CTransparencyControl()) CTransparencyControl.prototype.initialize = function(map) { var that=this; this.map = map; // Is this MSIE, if so we need to use AlphaImageLoader var agent = navigator.userAgent.toLowerCase(); if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false} // create the background graphic as a <div> containing an image var container = document.createElement("div"); container.style.width="70px"; container.style.height="21px"; // Handle transparent PNG files in MSIE if (this.ie) { var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');"; container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>'; } else { container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>'; } // create the knob as a GDraggableObject // Handle transparent PNG files in MSIE if (this.ie) { var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');"; this.knob = document.createElement("div"); this.knob.style.height="21px"; this.knob.style.width="13px"; this.knob.style.overflow="hidden"; this.knob_img = document.createElement("div"); this.knob_img.style.height="21px"; this.knob_img.style.width="83px"; this.knob_img.style.filter=loader; this.knob_img.style.position="relative"; this.knob_img.style.left="-70px"; this.knob.appendChild(this.knob_img); } else { this.knob = document.createElement("div"); this.knob.style.height="21px"; this.knob.style.width="13px"; this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)"; this.knob.style.backgroundPosition="-70px 0px"; } container.appendChild(this.knob); this.slide=new GDraggableObject(this.knob, {container:container}); this.slide.setDraggableCursor('pointer'); this.slide.setDraggingCursor('pointer'); this.container = container; // attach the control to the map map.getContainer().appendChild(container); // init slider this.setSlider(this.opacity); // Listen for the slider being moved and set the opacity GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()}); //GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) }); return container; } // Set the default position for the control CTransparencyControl.prototype.getDefaultPosition = function() { return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47)); } /* * Full-screen Window Resize */ function getWindowHeight() { if (self.innerHeight) return self.innerHeight; if (document.documentElement && document.documentElement.clientHeight) return document.documentElement.clientHeight; if (document.body) return document.body.clientHeight; return 0; } function getWindowWidth() { if (self.innerWidth) return self.innerWidth; if (document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if (document.body) return document.body.clientWidth; return 0; } function resize() { var map = document.getElementById("map"); var header = document.getElementById("header"); var subheader = document.getElementById("subheader"); map.style.height = (getWindowHeight()-80) + "px"; map.style.width = (getWindowWidth()-20) + "px"; header.style.width = (getWindowWidth()-20) + "px"; subheader.style.width = (getWindowWidth()-20) + "px"; // map.checkResize(); } /* * Main load function: */ function load() { if (GBrowserIsCompatible()) { // Bug in the Google Maps: Copyright for Overlay is not correctly displayed var gcr = GMapType.prototype.getCopyrights; GMapType.prototype.getCopyrights = function(bounds,zoom) { return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom)); } map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } ); map.addMapType(G_PHYSICAL_MAP); map.setMapType(G_PHYSICAL_MAP); map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds )); hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] ); GEvent.addListener(map, "maptypechanged", function() { if (map.getCurrentMapType() == G_HYBRID_MAP) { map.addOverlay(hybridOverlay); } else { map.removeOverlay(hybridOverlay); } } ); var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom); var mercator = new GMercatorProjection(mapMaxZoom+1); tilelayer.getTileUrl = function(tile,zoom) { if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) { return "http://www.maptiler.org/img/none.png"; } var ymax = 1 << zoom; var y = ymax - tile.y -1; var tileBounds = new GLatLngBounds( mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ), mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom ) ); if (mapBounds.intersects(tileBounds)) { return zoom+"/"+tile.x+"/"+y+".png"; } else { return "http://www.maptiler.org/img/none.png"; } } // IE 7-: support for PNG alpha channel // Unfortunately, the opacity for whole overlay is then not changeable, either or... tilelayer.isPng = function() { return true;}; tilelayer.getOpacity = function() { return opacity; } overlay = new GTileLayerOverlay( tilelayer ); map.addOverlay(overlay); map.addControl(new GLargeMapControl()); map.addControl(new GHierarchicalMapTypeControl()); map.addControl(new CTransparencyControl( overlay )); """ \ % args if self.kml: s += \ """ map.addMapType(G_SATELLITE_3D_MAP); map.getEarthInstance(getEarthInstanceCB); """ s += \ """ map.enableContinuousZoom(); map.enableScrollWheelZoom(); map.setMapType(G_HYBRID_MAP); } resize(); } """ if self.kml: s += \ """ function getEarthInstanceCB(object) { var ge = object; if (ge) { var url = document.location.toString(); url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml'; var link = ge.createLink(""); if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") } else { link.setHref(url) }; var networkLink = ge.createNetworkLink(""); networkLink.setName("TMS Map Overlay"); networkLink.setFlyToView(true); networkLink.setLink(link); ge.getFeatures().appendChild(networkLink); } else { // alert("You should open a KML in Google Earth"); // add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML? } } """ \ % args s += \ """ onresize=function(){ resize(); }; //]]> </script> </head> <body onload="load()"> <div id="header"><h1>%(title)s</h1></div> <div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU --> </div> <div id="map"></div> </body> </html> """ \ % args return s
[ "def generate_openlayers(self):\n\n args = {}\n args['title'] = self.options.title\n args['bingkey'] = self.options.bingkey\n (args['south'], args['west'], args['north'], args['east']) = \\\n self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url\n args['copyright'] = self.options.copyright\n if self.options.tmscompatible:\n args['tmsoffset'] = '-1'\n else:\n args['tmsoffset'] = ''\n if self.options.profile == 'raster':\n args['rasterzoomlevels'] = self.tmaxz + 1\n args['rastermaxresolution'] = 2 ** self.nativezoom \\\n * self.out_gt[1]\n\n s = \\\n \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n .olImageLoadError { display: none; }\n .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }\n </style>\"\"\" \\\n % args\n\n if self.options.profile == 'mercator':\n s += \\\n \"\"\"\n <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>\"\"\" \\\n % args\n\n s += \\\n \"\"\"\n <script src=\"http://www.openlayers.org/api/2.12/OpenLayers.js\"></script>\n <script>\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n var emptyTileURL = \"http://www.maptiler.org/img/none.png\";\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n\n function init(){\"\"\" \\\n % args\n\n if self.options.profile == 'mercator':\n s += \\\n \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:900913\",\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n numZoomLevels: 20\n };\n map = new OpenLayers.Map(options);\n\n // Create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n {\n type: google.maps.MapTypeId.ROADMAP,\n sphericalMercator: true\n });\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {\n type: google.maps.MapTypeId.SATELLITE,\n sphericalMercator: true\n });\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {\n type: google.maps.MapTypeId.HYBRID,\n sphericalMercator: true\n });\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {\n type: google.maps.MapTypeId.TERRAIN,\n sphericalMercator: true\n });\n\n // Create Bing layers\n var broad = new OpenLayers.Layer.Bing({\n name: \"Bing Roads\",\n key: \"%(bingkey)s\",\n type: \"Road\",\n sphericalMercator: true\n });\n var baer = new OpenLayers.Layer.Bing({\n name: \"Bing Aerial\",\n key: \"%(bingkey)s\",\n type: \"Aerial\",\n sphericalMercator: true\n });\n var bhyb = new OpenLayers.Layer.Bing({\n name: \"Bing Hybrid\",\n key: \"%(bingkey)s\",\n type: \"AerialWithLabels\",\n sphericalMercator: true\n });\n\n // Create OSM layer\n var osm = new OpenLayers.Layer.OSM(\"OpenStreetMap\");\n\n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([gmap, gsat, ghyb, gter,\n broad, baer, bhyb,\n osm, tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));\n \"\"\" \\\n % args\n elif self.options.profile == 'geodetic':\n\n s += \\\n \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:4326\"\n };\n map = new OpenLayers.Map(options);\n\n var wms = new OpenLayers.Layer.WMS(\"VMap0\",\n \"http://tilecache.osgeo.org/wms-c/Basic.py?\",\n {\n layers: 'basic',\n format: 'image/png'\n }\n );\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([wms,tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds);\n \"\"\" \\\n % args\n elif self.options.profile == 'raster':\n\n s += \\\n \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map(options);\n\n var layer = new OpenLayers.Layer.TMS(\"TMS Layer\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n getURL: getURL\n });\n\n map.addLayer(layer);\n map.zoomToExtent(mapBounds);\n \"\"\" \\\n % args\n\n s += \\\n \"\"\"\n map.addControls([new OpenLayers.Control.PanZoomBar(),\n new OpenLayers.Control.Navigation(),\n new OpenLayers.Control.MousePosition(),\n new OpenLayers.Control.ArgParser(),\n new OpenLayers.Control.Attribution()]);\n }\n \"\"\" \\\n % args\n\n if self.options.profile == 'mercator':\n s += \\\n \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {\n z+=1;\n }\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" \\\n % args\n elif self.options.profile == 'geodetic':\n\n s += \\\n \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom()%(tmsoffset)s;\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" \\\n % args\n elif self.options.profile == 'raster':\n\n s += \\\n \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" \\\n % args\n\n s += \\\n \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"http://www.maptiler.org/\">MapTiler</a>/<a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" \\\n % args\n\n return s", "def google_map(request):\n # Google Map\n google_map = {'height': '600px',\n 'width': '100%',\n 'kml_service': reverse('gizmos:get_kml')}\n\n messages.warning(request, 'WARNING: The \"google_map\" gizmo has been deprecated and may lose support in future releases of Tethys Platform.')\n\n context = {'google_map': google_map}\n\n return render(request, 'tethys_gizmos/gizmo_showcase/google_map.html', context)", "def draw_heatmap(x,y,z):\n x,y = np.meshgrid(x,y)\n terrain_map = folium.Map(location=[x[0,0], y[0,0]], tiles='Stamen Terrain', zoom_start=12)\n HeatMap(zip(x.flatten(),y.flatten(),z.flatten()), radius=10).add_to(terrain_map) \n terrain_map.save('map.html')", "def define_map(window=False, toolbar=False):\n \n # @ToDo: Make these configurable\n #config = gis.get_config()\n if not deployment_settings.get_security_map() or shn_has_role(\"MapAdmin\"):\n catalogue_toolbar = True\n else:\n catalogue_toolbar = False\n search = True\n catalogue_overlays = True\n\n # Custom Feature Layers\n # @ToDo: Move these layer definitions into the DB, removing Feature Groups\n # Feature Classes to be removed from Locations, although we still want the symbology mappings\n # Incidents\n module = \"irs\"\n resource = \"ireport\"\n layername = Tstr(\"Incident Reports\")\n popup_label = Tstr(\"Incident\")\n # Default (but still better to define here as otherwise each feature needs to check it's feature_class)\n marker = \"marker_red\"\n incidents = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=False)\n \n # Shelters\n module = \"cr\"\n resource = \"shelter\"\n layername = Tstr(\"Shelters\")\n popup_label = Tstr(\"Shelter\")\n marker = \"shelter\"\n shelters = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Schools\n module = \"sitrep\"\n resource = \"school_report\"\n layername = Tstr(\"Schools\")\n popup_label = Tstr(\"School\")\n marker = \"school\"\n schools = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Requests\n module = \"rms\"\n resource = \"req\"\n layername = Tstr(\"Requests\")\n popup_label = Tstr(\"Request\")\n marker = \"marker_yellow\"\n requests = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Assessments\n module = \"sitrep\"\n resource = \"assessment\"\n layername = Tstr(\"Assessments\")\n popup_label = Tstr(\"Assessment\")\n marker = \"marker_green\"\n assessments = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Activities\n module = \"project\"\n resource = \"activity\"\n layername = Tstr(\"Activities\")\n popup_label = Tstr(\"Activity\")\n marker = \"activity\"\n activities = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n feature_queries = [\n incidents,\n shelters,\n schools,\n requests,\n assessments,\n activities\n ]\n \n map = gis.show_map(\n window=window,\n catalogue_toolbar=catalogue_toolbar,\n #wms_browser = {\"name\" : \"Sahana \", \"url\" : \"http://geo.eden.sahanafoundation.org/geoserver/ows?service=WMS&request=GetCapabilities\"},\n toolbar=toolbar,\n search=search,\n catalogue_overlays=catalogue_overlays,\n feature_queries=feature_queries\n )\n\n return map", "def map_layer(geonotebook, tiled_layer, color_map=None, remove_existing=True):\n print('>> Creating pyramid layer...')\n pyramid_layer = tiled_layer.pyramid()\n if color_map is None:\n print('>> Getting layer histogram...')\n color_map = gps.ColorMap.build(pyramid_layer.get_histogram(), 'magma')\n tms = gps.TMS.build(pyramid_layer, color_map)\n if remove_existing:\n remove_map_layers(geonotebook)\n print('>> Adding weighted overlay layer to map...')\n geonotebook.add_layer(TMSRasterData(tms), name=\"layer\")", "def generateImage(self, **kwargs):\n\n start_x = kwargs.get('start_x', None)\n start_y = kwargs.get('start_y', None)\n tile_width = kwargs.get('tile_width', 5)\n tile_height = kwargs.get('tile_height', 5)\n\n # Check that we have x and y tile coordinates\n if start_x == None or start_y == None :\n start_x, start_y = self.getXY()\n\n # Determine the size of the image\n width, height = 256 * tile_width, 256 * tile_height\n\n #Create a new image of the size require\n map_img = Image.new('RGB', (width,height))\n sat_img = Image.new('RGB', (width,height))\n\n for x in range(0, tile_width):\n for y in range(0, tile_height) :\n if True:\n if args.label:\n # Store the image with labels\n url = 'https://mt0.google.com/vt/lyrs=y&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt/lyrs=s&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n sat_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n\n if True:\n if args.label:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom) # work needs to be done\n if args.debug: print(url)\n\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n map_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n return map_img, sat_img", "def set_map_overlay(self, flag):\n self._map_overlay = flag", "def map_viewing_client():\n\n # Read configuration settings\n config = gis.get_config()\n if config.opt_gis_layout == 1:\n window = True\n else:\n window = False\n\n # @ToDo Make Configurable\n toolbar = True\n \n map = define_map(window=window, toolbar=toolbar)\n\n return dict(map=map)", "def display_map(request):\n\n sightings = Sighting.objects.all()[:100]\n context = {\n 'sightings' : sightings,\n }\n return render(request, 'map/map.html', context)", "def generate_base_tiles(self):\n\n print 'Generating Base Tiles:'\n\n if self.options.verbose:\n\n # mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n # px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n # print \"Pixel coordinates:\", px, py, (mx, my)\n\n print ''\n print 'Tiles generated from the max zoom level:'\n print '----------------------------------------'\n print ''\n\n # Set the bounds\n\n (tminx, tminy, tmaxx, tmaxy) = self.tminmax[self.tmaxz]\n\n # Just the center tile\n # tminx = tminx+ (tmaxx - tminx)/2\n # tminy = tminy+ (tmaxy - tminy)/2\n # tmaxx = tminx\n # tmaxy = tminy\n\n ds = self.out_ds\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print ('dataBandsCount: ', self.dataBandsCount)\n print ('tilebands: ', tilebands)\n\n # print tminx, tminy, tmaxx, tmaxy\n\n tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))\n\n # print tcount\n\n ti = 0\n\n tz = self.tmaxz\n yrange = range(tmaxy, tminy - 1, -1)\n if self.options.leaflet:\n yrange = range(tminy, tmaxy + 1)\n\n for ty in yrange:\n for tx in range(tminx, tmaxx + 1):\n\n if self.stopped:\n break\n ti += 1\n tilefilename = os.path.join(self.output, str(tz),\n str(tx), '%s.%s' % ((2**tz-1-ty), self.tileext))\n if self.options.verbose:\n print (ti, '/', tcount, tilefilename) # , \"( TileMapService: z / x / y )\"\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print 'Tile generation skiped because of --resume'\n else:\n self.progressbar(ti / float(tcount))\n continue\n\n # Create directories for the tile\n\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n\n # Tile bounds in EPSG:900913\n\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # print \"\\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif\" % ( b[0], b[1], b[2], b[3], \"tiles.vrt\", tz, tx, ty)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n (rb, wb) = self.geo_query(ds, b[0], b[3], b[2],\n b[1])\n nativesize = wb[0] + wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print ('\\tNative Extent (querysize',\n nativesize, '): ', rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n\n (rb, wb) = self.geo_query(\n ds,\n b[0],\n b[3],\n b[2],\n b[1],\n querysize=querysize,\n )\n\n (rx, ry, rxsize, rysize) = rb\n (wx, wy, wxsize, wysize) = wb\n else:\n\n # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = tx * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n if self.options.leaflet:\n ry = ty * tsize\n else:\n ry = ysize - ty * tsize - rysize\n\n (wx, wy) = (0, 0)\n (wxsize, wysize) = (int(rxsize / float(tsize)\n * self.tilesize), int(rysize / float(tsize)\n * self.tilesize))\n if not self.options.leaflet:\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n if self.options.verbose:\n print ('\\tReadRaster Extent: ', (rx, ry, rxsize,\n rysize), (wx, wy, wxsize, wysize))\n\n # Query is in 'nearest neighbour' but can be bigger in then the tilesize\n # We scale down the query to the tilesize by supplied algorithm.\n\n # Tile dataset in memory\n\n dstile = self.mem_drv.Create('', self.tilesize,\n self.tilesize, tilebands)\n data = ds.ReadRaster(\n rx,\n ry,\n rxsize,\n rysize,\n wxsize,\n wysize,\n band_list=list(range(1, self.dataBandsCount + 1)),\n )\n alpha = self.alphaband.ReadRaster(\n rx,\n ry,\n rxsize,\n rysize,\n wxsize,\n wysize,\n )\n\n if self.tilesize == querysize:\n\n # Use the ReadRaster result directly in tiles ('nearest neighbour' query)\n dstile.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n data,\n band_list=list(range(1, self.dataBandsCount\n + 1)),\n )\n dstile.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n alpha,\n band_list=[tilebands],\n )\n else:\n \n # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)\n # the ReadRaster function returns high-quality raster (not ugly nearest neighbour)\n # TODO: Use directly 'near' for WaveLet files\n # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo\n\n dsquery = self.mem_drv.Create('', querysize,\n querysize, tilebands)\n\n # TODO: fill the null value in case a tile without alpha is produced (now only png tiles are supported)\n # for i in range(1, tilebands+1):\n # dsquery.GetRasterBand(1).Fill(tilenodata)\n\n dsquery.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n data,\n band_list=list(range(1, self.dataBandsCount\n + 1)),\n )\n dsquery.WriteRaster(\n wx,\n wy,\n wxsize,\n wysize,\n alpha,\n band_list=[tilebands],\n )\n\n # print('-'+tilefilename+'-')\n self.scale_query_to_tile(dsquery, dstile,\n tilefilename)\n del dsquery\n\n del data\n\n if self.options.resampling != 'antialias':\n\n # Write a copy of tile to png/jpg\n\n self.out_drv.CreateCopy(tilefilename, dstile,\n strict=0)\n\n del dstile\n\n # Create a KML file for this tile.\n\n if self.kml:\n kmlfilename = os.path.join(self.output, str(tz),\n str(tx), '%d.kml' % ty)\n if not self.options.resume \\\n or not os.path.exists(kmlfilename):\n f = open(kmlfilename, 'w')\n f.write(self.generate_kml(tx, ty, tz))\n f.close()\n\n if not self.options.verbose:\n self.progressbar(ti / float(tcount))", "def client_map_detail_view_html(options=None):\n return dict()", "def _render_tiles(self, tiles, wslice, hslice):\n\n for row in tiles:\n for atile in row:\n basex = wslice*atile.x\n basey = hslice*atile.y\n if atile.visited is True:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=atile.bg)\n else:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=\"black\")", "def __defineMap(self):\n from bokeh.models import WMTSTileSource\n url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'\n wmts = WMTSTileSource(url=url)\n mapTiles = gv.WMTS(wmts)\n return mapTiles", "def client_map_view_html(options=None):\n return dict()", "def __stitch_maps(self, img):\n func_dict = {\"google\": self.__request_google_image, \"bing\": self.__request_bing_image}\n size_dict = {\"google\": (1280, 1280), \"bing\": (1280, 1280)}\n\n # get x and y\n x = size_dict.get(self.source)[0]\n y = size_dict.get(self.source)[1]\n\n # map in the middle\n mid_r = func_dict.get(self.source)(img, 0, 0)\n mid = byte_2_img(mid_r)\n mid = mid[0:y, 0:x]\n # map in the bottom\n bot_r = func_dict.get(self.source)(img, -0.00027, 0)\n bot = byte_2_img(bot_r)\n bot = bot[0:y, 0:x]\n # map in the top\n top_r = func_dict.get(self.source)(img, 0.00029, 0)\n top = byte_2_img(top_r)\n top = top[0:y, 0:x]\n # map in the left\n left_r = func_dict.get(self.source)(img, 0, -0.00062)\n left = byte_2_img(left_r)\n left = left[0:y, 0:x]\n # map in the right\n right_r = func_dict.get(self.source)(img, 0, 0.00060)\n right = byte_2_img(right_r)\n right = right[0:y, 0:x]\n # map in the top left\n top_left_r = func_dict.get(self.source)(img, 0.00029, -0.00062)\n top_left = byte_2_img(top_left_r)\n top_left = top_left[0:y, 0:x]\n # map in the bottom left\n bot_left_r = func_dict.get(self.source)(img, -0.00027, -0.00062)\n bot_left = byte_2_img(bot_left_r)\n bot_left = bot_left[0:y, 0:x]\n # map in the top right\n top_right_r = func_dict.get(self.source)(img, 0.00029, 0.00060)\n top_right = byte_2_img(top_right_r)\n top_right = top_right[0:y, 0:x]\n # map in the bottom right\n bot_right_r = func_dict.get(self.source)(img, -0.00027, 0.00060)\n bot_right = byte_2_img(bot_right_r)\n bot_right = bot_right[0:y, 0:x]\n\n # find the stitch index for each image\n j1 = find_stitch_index(mid, top, 0)\n j2 = find_stitch_index(bot, mid, 0)\n j3 = find_stitch_index(mid, left, 1)\n j4 = find_stitch_index(right, mid, 1)\n\n # cut the map to correct size\n new_top = top[0:j1, 0:x]\n new_tleft = top_left[0:j1, 0:j3]\n new_tright = top_right[0:j1, x - j4:x]\n new_left = left[0:y, 0:j3]\n new_right = right[0:y, x - j4:x]\n new_bottom = bot[y - j2:y, 0:x]\n new_bleft = bot_left[y - j2:y, 0:j3]\n new_bright = bot_right[y - j2:y, x - j4:x]\n\n # concatenate maps\n img0 = np.concatenate([new_tleft, new_top, new_tright], 1)\n img1 = np.concatenate([new_left, mid, new_right], 1)\n img2 = np.concatenate([new_bleft, new_bottom, new_bright], 1)\n img3 = np.concatenate([img0, img1, img2], 0)\n\n return img3", "def embed_map(leafmap, path=\"map.html\"):\n leafmap.create_map(path=path)\n return HTML(('<iframe src=\"files/{path}\" '\n 'style=\"width: 100%; height: 510px; border: none\">'\n '</iframe>').format(path=path))", "def display_google_map():\n\n email = session.get(\"email\")\n items = PublicItem.query.all()\n places = []\n\n for item in items:\n item_coordinates = [item.title, item.latitude,\n item.longitude]\n places.append(item_coordinates)\n \n # change back to UTF-8\n for location in places:\n location[0] = str(location[0])\n\n return render_template(\"public-items-map.html\",\n gm_api_key=gm_api_key,\n places=places,\n email=email)", "def timeMap(request):\n\n map_key = MAP_KEY\n (minLon, maxLon, minLat, maxLat) = (mark_safe(min_lat),\n mark_safe(max_lat), mark_safe(min_lon), mark_safe(max_lon))\n return render_to_response('messages_time_map.html', locals(),\n context_instance=RequestContext(request))", "def overlap_map(self):\n self.method.plot_overlapping()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
def generate_openlayers(self): args = {} args['title'] = self.options.title args['bingkey'] = self.options.bingkey (args['south'], args['west'], args['north'], args['east']) = \ self.swne args['minzoom'] = self.tminz args['maxzoom'] = self.tmaxz args['tilesize'] = self.tilesize args['tileformat'] = self.tileext args['publishurl'] = self.options.url args['copyright'] = self.options.copyright if self.options.tmscompatible: args['tmsoffset'] = '-1' else: args['tmsoffset'] = '' if self.options.profile == 'raster': args['rasterzoomlevels'] = self.tmaxz + 1 args['rastermaxresolution'] = 2 ** self.nativezoom \ * self.out_gt[1] s = \ """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" <head> <title>%(title)s</title> <meta http-equiv='imagetoolbar' content='no'/> <style type="text/css"> v\:* {behavior:url(#default#VML);} html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; } body { margin: 10px; background: #fff; } h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; } #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; } #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;} #map { height: 95%%; border: 1px solid #888; } .olImageLoadError { display: none; } .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; } </style>""" \ % args if self.options.profile == 'mercator': s += \ """ <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>""" \ % args s += \ """ <script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script> <script> var map; var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s); var mapMinZoom = %(minzoom)s; var mapMaxZoom = %(maxzoom)s; var emptyTileURL = "http://www.maptiler.org/img/none.png"; OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3; function init(){""" \ % args if self.options.profile == 'mercator': s += \ """ var options = { div: "map", controls: [], projection: "EPSG:900913", displayProjection: new OpenLayers.Projection("EPSG:4326"), numZoomLevels: 20 }; map = new OpenLayers.Map(options); // Create Google Mercator layers var gmap = new OpenLayers.Layer.Google("Google Streets", { type: google.maps.MapTypeId.ROADMAP, sphericalMercator: true }); var gsat = new OpenLayers.Layer.Google("Google Satellite", { type: google.maps.MapTypeId.SATELLITE, sphericalMercator: true }); var ghyb = new OpenLayers.Layer.Google("Google Hybrid", { type: google.maps.MapTypeId.HYBRID, sphericalMercator: true }); var gter = new OpenLayers.Layer.Google("Google Terrain", { type: google.maps.MapTypeId.TERRAIN, sphericalMercator: true }); // Create Bing layers var broad = new OpenLayers.Layer.Bing({ name: "Bing Roads", key: "%(bingkey)s", type: "Road", sphericalMercator: true }); var baer = new OpenLayers.Layer.Bing({ name: "Bing Aerial", key: "%(bingkey)s", type: "Aerial", sphericalMercator: true }); var bhyb = new OpenLayers.Layer.Bing({ name: "Bing Hybrid", key: "%(bingkey)s", type: "AerialWithLabels", sphericalMercator: true }); // Create OSM layer var osm = new OpenLayers.Layer.OSM("OpenStreetMap"); // create TMS Overlay layer var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', isBaseLayer: false, getURL: getURL }); if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); } map.addLayers([gmap, gsat, ghyb, gter, broad, baer, bhyb, osm, tmsoverlay]); var switcherControl = new OpenLayers.Control.LayerSwitcher(); map.addControl(switcherControl); switcherControl.maximizeControl(); map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection)); """ \ % args elif self.options.profile == 'geodetic': s += \ """ var options = { div: "map", controls: [], projection: "EPSG:4326" }; map = new OpenLayers.Map(options); var wms = new OpenLayers.Layer.WMS("VMap0", "http://tilecache.osgeo.org/wms-c/Basic.py?", { layers: 'basic', format: 'image/png' } ); var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', isBaseLayer: false, getURL: getURL }); if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); } map.addLayers([wms,tmsoverlay]); var switcherControl = new OpenLayers.Control.LayerSwitcher(); map.addControl(switcherControl); switcherControl.maximizeControl(); map.zoomToExtent(mapBounds); """ \ % args elif self.options.profile == 'raster': s += \ """ var options = { div: "map", controls: [], maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s), maxResolution: %(rastermaxresolution)f, numZoomLevels: %(rasterzoomlevels)d }; map = new OpenLayers.Map(options); var layer = new OpenLayers.Layer.TMS("TMS Layer", "", { serviceVersion: '.', layername: '.', alpha: true, type: '%(tileformat)s', getURL: getURL }); map.addLayer(layer); map.zoomToExtent(mapBounds); """ \ % args s += \ """ map.addControls([new OpenLayers.Control.PanZoomBar(), new OpenLayers.Control.Navigation(), new OpenLayers.Control.MousePosition(), new OpenLayers.Control.ArgParser(), new OpenLayers.Control.Attribution()]); } """ \ % args if self.options.profile == 'mercator': s += \ """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom(); if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') { z+=1; } var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ \ % args elif self.options.profile == 'geodetic': s += \ """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom()%(tmsoffset)s; var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ \ % args elif self.options.profile == 'raster': s += \ """ function getURL(bounds) { bounds = this.adjustBounds(bounds); var res = this.getServerResolution(); var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w)); var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h)); var z = this.getServerZoom(); var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type; var url = this.url; if (OpenLayers.Util.isArray(url)) { url = this.selectUrl(path, url); } if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) { return url + path; } else { return emptyTileURL; } } """ \ % args s += \ """ function getWindowHeight() { if (self.innerHeight) return self.innerHeight; if (document.documentElement && document.documentElement.clientHeight) return document.documentElement.clientHeight; if (document.body) return document.body.clientHeight; return 0; } function getWindowWidth() { if (self.innerWidth) return self.innerWidth; if (document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if (document.body) return document.body.clientWidth; return 0; } function resize() { var map = document.getElementById("map"); var header = document.getElementById("header"); var subheader = document.getElementById("subheader"); map.style.height = (getWindowHeight()-80) + "px"; map.style.width = (getWindowWidth()-20) + "px"; header.style.width = (getWindowWidth()-20) + "px"; subheader.style.width = (getWindowWidth()-20) + "px"; if (map.updateSize) { map.updateSize(); }; } onresize=function(){ resize(); }; </script> </head> <body onload="init()"> <div id="header"><h1>%(title)s</h1></div> <div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright &copy; 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> &amp; <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU --> </div> <div id="map"></div> <script type="text/javascript" >resize()</script> </body> </html>""" \ % args return s
[ "def map_service_catalogue():\n if deployment_settings.get_security_map() and not shn_has_role(\"MapAdmin\"):\n unauthorised()\n\n subtitle = T(\"List Layers\")\n # Start building the Return with the common items\n output = dict(subtitle=subtitle)\n\n # Hack: We control all perms from this 1 table\n table = db.gis_layer_openstreetmap\n authorised = shn_has_permission(\"update\", table)\n item_list = []\n even = True\n if authorised:\n # List View with checkboxes to Enable/Disable layers\n for type in gis_layer_types:\n table = db[\"gis_layer_%s\" % type]\n query = table.id > 0\n sqlrows = db(query).select()\n for row in sqlrows:\n if even:\n theclass = \"even\"\n even = False\n else:\n theclass = \"odd\"\n even = True\n if row.description:\n description = row.description\n else:\n description = \"\"\n label = type + \"_\" + str(row.id)\n if row.enabled:\n enabled = INPUT(_type=\"checkbox\", value=True, _name=label)\n else:\n enabled = INPUT(_type=\"checkbox\", _name=label)\n item_list.append(TR(TD(row.name), TD(description), TD(enabled), _class=theclass))\n\n table_header = THEAD(TR(TH(\"Layer\"), TH(\"Description\"), TH(\"Enabled?\")))\n table_footer = TFOOT(TR(TD(INPUT(_id=\"submit_button\", _type=\"submit\", _value=T(\"Update\")), _colspan=3)), _align=\"right\")\n items = DIV(FORM(TABLE(table_header, TBODY(item_list), table_footer, _id=\"table-container\"), _name=\"custom\", _method=\"post\", _enctype=\"multipart/form-data\", _action=URL(r=request, f=\"layers_enable\")))\n\n else:\n # Simple List View\n for type in gis_layer_types:\n table = db[\"gis_layer_%s\" % type]\n query = table.id > 0\n sqlrows = db(query).select()\n for row in sqlrows:\n if even:\n theclass = \"even\"\n even = False\n else:\n theclass = \"odd\"\n even = True\n if row.description:\n description = row.description\n else:\n description = \"\"\n if row.enabled:\n enabled = INPUT(_type=\"checkbox\", value=\"on\", _disabled=\"disabled\")\n else:\n enabled = INPUT(_type=\"checkbox\", _disabled=\"disabled\")\n item_list.append(TR(TD(row.name), TD(description), TD(enabled), _class=theclass))\n\n table_header = THEAD(TR(TH(\"Layer\"), TH(\"Description\"), TH(\"Enabled?\")))\n items = DIV(TABLE(table_header, TBODY(item_list), _id=\"table-container\"))\n\n output.update(dict(items=items))\n return output", "def map_layer(geonotebook, tiled_layer, color_map=None, remove_existing=True):\n print('>> Creating pyramid layer...')\n pyramid_layer = tiled_layer.pyramid()\n if color_map is None:\n print('>> Getting layer histogram...')\n color_map = gps.ColorMap.build(pyramid_layer.get_histogram(), 'magma')\n tms = gps.TMS.build(pyramid_layer, color_map)\n if remove_existing:\n remove_map_layers(geonotebook)\n print('>> Adding weighted overlay layer to map...')\n geonotebook.add_layer(TMSRasterData(tms), name=\"layer\")", "def _skymap(self, **kwargs):\n from pesummary.gw.plots.plot import _ligo_skymap_plot\n\n if \"luminosity_distance\" in self.keys():\n dist = self[\"luminosity_distance\"]\n else:\n dist = None\n\n return _ligo_skymap_plot(self[\"ra\"], self[\"dec\"], dist=dist, **kwargs)", "def _data_layer(self, cov, name, extent=None, group=None, offsite=None,\n wrapped=False, mask=None, indices=None, ropt=None):\n layer = ms.layerObj()\n layer.name = name\n layer.type = ms.MS_LAYER_RASTER\n\n if extent:\n layer.setMetaData(\"wms_extent\", \"%.12e %.12e %.12e %.12e\"%extent)\n layer.setExtent(*extent)\n\n #layer.setMetaData(\n # \"wms_enable_request\", \"getcapabilities getmap getfeatureinfo\"\n #)\n\n if wrapped:\n # set the info for the connector to wrap this layer around the dateline\n layer.setMetaData(\"eoxs_wrap_dateline\", \"true\")\n\n # set projection\n sr = cov.spatial_reference\n layer.setProjection(sr.proj)\n layer.setMetaData(\"ows_srs\", \"EPSG:%d\"%sr.srid)\n layer.setMetaData(\"wms_srs\", \"EPSG:%d\"%sr.srid)\n\n if indices:\n #layer.addProcessing(\"CLOSE_CONNECTION=CLOSE\") #What it this good for?\n layer.setProcessingKey(\"BANDS\", \",\".join(\"%d\"%v for v in indices))\n\n if group:\n layer.setMetaData(\"wms_layer_group\", group)\n\n if offsite:\n layer.offsite = offsite\n else:\n layer.offsite = self._offsite_color(cov.range_type, indices)\n\n if ropt:\n if ropt.scale_min is not None and ropt.scale_max is not None:\n scale = \"%d,%d\"%(ropt.scale_min, ropt.scale_max)\n layer.setProcessingKey(\"SCALE\", scale)\n elif ropt.scale_auto:\n layer.setProcessingKey(\"SCALE\", \"AUTO\")\n\n if mask:\n layer.mask = mask\n\n return layer", "def overlayImage(movie_name, locs_name, frame_number, sx = 8, sy = 8):\n frame = datareader.inferReader(movie_name).loadAFrame(frame_number).astype(numpy.float64)\n with saH5Py.SAH5Py(locs_name) as h5:\n locs = h5.getLocalizationsInFrame(frame_number)\n\n frame = frame - numpy.min(frame)\n frame = frame/numpy.max(frame)\n \n fig = pyplot.figure(figsize = (sx, sy))\n ax = fig.add_subplot(1,1,1)\n ax.imshow(frame, interpolation = 'nearest', cmap = \"gray\")\n for i in range(locs[\"x\"].size):\n width = 10\n height = 10\n if \"xsigma\" in locs:\n width = height = 5.0*locs[\"xsigma\"][i]\n if \"ysigma\" in locs:\n height = 5.0*locs[\"ysigma\"][i]\n ellipse = patches.Ellipse((locs[\"x\"][i], locs[\"y\"][i]), width, height, facecolor='none', edgecolor='g', linewidth = 2)\n ax.add_artist(ellipse)\n \n #ax.scatter(locs[\"x\"], locs[\"y\"], s = 200,\n ax.set_title(\"Overlay Image\")\n\n pyplot.show()", "def define_map(window=False, toolbar=False):\n \n # @ToDo: Make these configurable\n #config = gis.get_config()\n if not deployment_settings.get_security_map() or shn_has_role(\"MapAdmin\"):\n catalogue_toolbar = True\n else:\n catalogue_toolbar = False\n search = True\n catalogue_overlays = True\n\n # Custom Feature Layers\n # @ToDo: Move these layer definitions into the DB, removing Feature Groups\n # Feature Classes to be removed from Locations, although we still want the symbology mappings\n # Incidents\n module = \"irs\"\n resource = \"ireport\"\n layername = Tstr(\"Incident Reports\")\n popup_label = Tstr(\"Incident\")\n # Default (but still better to define here as otherwise each feature needs to check it's feature_class)\n marker = \"marker_red\"\n incidents = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=False)\n \n # Shelters\n module = \"cr\"\n resource = \"shelter\"\n layername = Tstr(\"Shelters\")\n popup_label = Tstr(\"Shelter\")\n marker = \"shelter\"\n shelters = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Schools\n module = \"sitrep\"\n resource = \"school_report\"\n layername = Tstr(\"Schools\")\n popup_label = Tstr(\"School\")\n marker = \"school\"\n schools = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Requests\n module = \"rms\"\n resource = \"req\"\n layername = Tstr(\"Requests\")\n popup_label = Tstr(\"Request\")\n marker = \"marker_yellow\"\n requests = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Assessments\n module = \"sitrep\"\n resource = \"assessment\"\n layername = Tstr(\"Assessments\")\n popup_label = Tstr(\"Assessment\")\n marker = \"marker_green\"\n assessments = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n # Activities\n module = \"project\"\n resource = \"activity\"\n layername = Tstr(\"Activities\")\n popup_label = Tstr(\"Activity\")\n marker = \"activity\"\n activities = gis.get_feature_layer(module, resource, layername, popup_label, marker, active=True)\n \n feature_queries = [\n incidents,\n shelters,\n schools,\n requests,\n assessments,\n activities\n ]\n \n map = gis.show_map(\n window=window,\n catalogue_toolbar=catalogue_toolbar,\n #wms_browser = {\"name\" : \"Sahana \", \"url\" : \"http://geo.eden.sahanafoundation.org/geoserver/ows?service=WMS&request=GetCapabilities\"},\n toolbar=toolbar,\n search=search,\n catalogue_overlays=catalogue_overlays,\n feature_queries=feature_queries\n )\n\n return map", "def write_map_layers(cur, sb, lang, filename):\n basemaps = []\n layergroups = []\n\n sb.writelines([\"Arches.createNamespace('Arches.i18n.MapLayers');\\n\"])\n\n # first get the list of all layer ids\n cur.execute(\"SELECT id FROM app_metadata.maplayers ORDER BY layergroup_i18n_key\")\n layerids = cur.fetchall()\n\n for layerid in layerids:\n # try and get the localized layer info\n cur.execute(\"\"\"SELECT id, active, on_map as \\\"onMap\\\", selectable, \n basemap, app_metadata.get_i18n_value(name_i18n_key, %s, '') as name, icon, symbology, thumbnail, \n app_metadata.get_i18n_value(description_i18n_key, %s, '') as description, \n app_metadata.get_i18n_value(layergroup_i18n_key, %s, '') as layergroup, layer, sortorder \n FROM app_metadata.maplayers \n WHERE maplayers.id = %s \"\"\", [lang, lang, lang, layerid])\n\n rows = dictfetchall(cur)\n\n # if the layer name hasn't been localized fall back to the default language\n if len(rows) == 0:\n cur.execute(\"SELECT id, active, on_map as \\\"onMap\\\", selectable, \"\n \"basemap, app_metadata.get_i18n_value(name_i18n_key, %s, '') as name, icon, symbology, thumbnail, \"\n \"app_metadata.get_i18n_value(description_i18n_key, %s, '') as description, \"\n \"app_metadata.get_i18n_value(layergroup_i18n_key, %s, '') as layergroup, layer, sortorder \"\n \"FROM app_metadata.maplayers \"\n \"WHERE maplayers.id = %s \", [lang, lang, lang, layerid])\n\n rows = dictfetchall(cur) \n\n # write out basemaps first\n for row in rows:\n if row['basemap'] == True:\n basemaps.append(row)\n\n # write out all other layers next\n for row in rows:\n if row['basemap'] == False:\n if has_name(layergroups, row['layergroup']):\n get_layergroup_by_name(layergroups, row['layergroup']).layers.append(row)\n else:\n layergroups.append(LayerGroup(row['layergroup']))\n get_layergroup_by_name(layergroups, row['layergroup']).layers.append(row)\n\n\n ret = \"Arches.i18n.MapLayers.basemaps=\" + JSONSerializer().serialize(basemaps, ensure_ascii=False) + \";\\n\"\n sb.writelines(ret)\n utils.WriteToFile(filename, sb.getvalue(), 'a')\n sb.truncate(0) \n\n ret = \"Arches.i18n.MapLayers.layerGroups=\" + JSONSerializer().serialize(layergroups, ensure_ascii=False) + \";\\n\"\n sb.writelines(ret)\n utils.WriteToFile(filename, sb.getvalue(), 'a')\n sb.truncate(0)", "def SOC_map(occurances):\n token = open(\"token.txt\").read()\n fig2 = px.scatter_mapbox(occurances, lat=\"GPS_LAT\", lon=\"GPS_LONG\",\n color_discrete_sequence=[\"green\"], zoom=2, height=600, size_max=10,\n color_continuous_scale=px.colors.diverging.RdYlGn, color='OC',\n range_color=[df2['OC'].min(), df2['OC'].max()], opacity=0.75, hover_name=\"Point_ID\",\n animation_frame='Year')\n fig2.update_layout(mapbox_style=\"satellite-streets\", mapbox_accesstoken=token)\n fig2.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig2.update_layout({\n 'plot_bgcolor': 'rgba(0, 0, 0, 0)',\n 'paper_bgcolor': 'rgba(0, 0, 0, 0)',\n 'font': {'color': 'black'}\n })\n fig2.update_layout(clickmode='event')\n return fig2", "def __defineMap(self):\n from bokeh.models import WMTSTileSource\n url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'\n wmts = WMTSTileSource(url=url)\n mapTiles = gv.WMTS(wmts)\n return mapTiles", "def __init__(\n self,\n grid,\n z0s,\n ids,\n attrs,\n x0=0,\n y0=0,\n function=lambda x, y: 0 * x + 0 * y,\n layer_type=\"EventLayers\",\n dz_advection=0,\n rock_id=None,\n ):\n\n function_args = function.__code__.co_varnames\n if len(function_args) != 2:\n raise ValueError(\n \"LithoLayers: function must take exactly two arguments, x and y.\"\n )\n\n if np.asarray(z0s).size != np.asarray(ids).size:\n raise ValueError(\n \"LithoLayers: Size of layer depths and layer IDs must be the same\"\n )\n\n if np.any(np.diff(z0s) < 0):\n raise ValueError(\"LithoLayers: Bad layer depth order passed.\")\n\n z_surf = function(grid.x_of_node - x0, grid.y_of_node - y0)\n\n if hasattr(z_surf, \"shape\"):\n if z_surf.shape != grid.x_of_node.shape:\n raise ValueError(\n \"LithoLayers: function must return an array of shape (n_nodes,)\"\n )\n else:\n raise ValueError(\n \"LithoLayers: function must return an array of shape (n_nodes,)\"\n )\n\n layer_thicknesses = []\n layer_ids = []\n\n num_layers = np.asarray(z0s).size\n\n last_layer_elev = np.zeros(grid.number_of_nodes)\n\n # create layers (here listed from the top to the bottom.)\n for i in range(num_layers):\n layer_depth = z_surf + z0s[i]\n layer_depth[layer_depth < 0] = 0\n\n layer_thickness = layer_depth.copy() - last_layer_elev.copy()\n\n last_layer_elev = layer_depth.copy()\n\n layer_thicknesses.append(layer_thickness)\n layer_ids.append(ids[i] * np.ones(z_surf.size))\n\n super().__init__(\n grid,\n layer_thicknesses,\n layer_ids,\n attrs,\n layer_type=layer_type,\n dz_advection=dz_advection,\n rock_id=rock_id,\n )", "def swap_overlays(request):\n\n overlay_json = {\"type\": \"GeometryCollection\",\n \"geometries\": [{\"type\": \"Polygon\",\n \"coordinates\": [[40.643135583312805, -111.48951530456543],\n [40.636622594719725, -111.49432182312012],\n [40.63310531666155, -111.4877986907959],\n [40.63805550673186, -111.48110389709473],\n [40.6413120105605, -111.48539543151855]],\n \"properties\": {\"id\": 4, \"value\": 5}, \"crs\": {\"type\": \"link\", \"properties\": {\n \"href\": \"http://spatialreference.org/ref/epsg/4326/proj4/\", \"type\": \"proj4\"}}\n },\n {\"type\": \"Point\",\n \"coordinates\": [40.629587853312174, -111.50959968566895],\n \"properties\": {\"id\": 5, \"value\": 6}, \"crs\": {\"type\": \"link\", \"properties\": {\n \"href\": \"http://spatialreference.org/ref/epsg/4326/proj4/\", \"type\": \"proj4\"}}\n },\n {\"type\": \"LineString\",\n \"coordinates\": [[40.62737305910759, -111.50118827819824],\n [40.61564645424611, -111.5071964263916],\n [40.61277963772034, -111.48608207702637],\n [40.62802447679272, -111.49157524108887]],\n \"properties\": {\"id\": 6, \"value\": 7}, \"crs\": {\"type\": \"link\", \"properties\": {\n \"href\": \"http://spatialreference.org/ref/epsg/4326/proj4/\", \"type\": \"proj4\"}}\n }\n ]\n }\n\n return HttpResponse(json.dumps(overlay_json), content_type='application/json')", "def plot_on_sphere():\n\n import matplotlib.pyplot as plt\n\n # Nondimensionalized radius of the earth\n Rsphere = 1.0\n\n def contourLineSphere(fileName='fort.q0000',path='./_output'):\n \"\"\"\n This function plots the contour lines on a spherical surface for the shallow\n water equations solved on a sphere.\n \"\"\" \n \n # Open file\n # =========\n \n # Concatenate path and file name\n pathFileName = path + \"/\" + fileName\n\n f = open(pathFileName,\"r\")\n\n # Read file header\n # ================\n # The information contained in the first two lines are not used.\n unused = f.readline() # patch_number\n unused = f.readline() # AMR_level\n\n # Read mx, my, xlow, ylow, dx and dy\n line = f.readline()\n sline = line.split()\n mx = int(sline[0])\n\n line = f.readline()\n sline = line.split()\n my = int(sline[0])\n\n line = f.readline()\n sline = line.split()\n xlower = float(sline[0])\n\n line = f.readline()\n sline = line.split()\n ylower = float(sline[0])\n\n line = f.readline()\n sline = line.split()\n dx = float(sline[0])\n\n line = f.readline()\n sline = line.split()\n dy = float(sline[0])\n\n\n # Patch:\n # ====\n xupper = xlower + mx * dx\n yupper = ylower + my * dy\n\n x = pyclaw.Dimension(xlower,xupper,mx,name='x')\n y = pyclaw.Dimension(ylower,yupper,my,name='y')\n patch = pyclaw.Patch([x,y])\n\n\n # Override default mapc2p function\n # ================================\n patch.mapc2p = mapc2p_sphere_vectorized\n\n\n # Compute the physical coordinates of each cell's centers\n # ======================================================\n patch.compute_p_centers(recompute=True)\n xp = patch._p_centers[0]\n yp = patch._p_centers[1]\n zp = patch._p_centers[2]\n\n patch.compute_c_centers(recompute=True)\n xc = patch._c_centers[0]\n yc = patch._c_centers[1]\n \n # Define arrays of conserved variables\n h = np.zeros((mx,my))\n hu = np.zeros((mx,my))\n hv = np.zeros((mx,my))\n hw = np.zeros((mx,my))\n\n # Read solution\n for j in range(my):\n tmp = np.fromfile(f,dtype='float',sep=\" \",count=4*mx)\n tmp = tmp.reshape((mx,4))\n h[:,j] = tmp[:,0]\n hu[:,j] = tmp[:,1]\n hv[:,j] = tmp[:,2]\n hw[:,j] = tmp[:,3]\n\n \n # Plot solution in the computational domain\n # =========================================\n\n # Fluid height\n plt.figure()\n CS = plt.contour(xc,yc,h)\n plt.title('Fluid height (computational domain)')\n plt.xlabel('xc')\n plt.ylabel('yc')\n plt.clabel(CS, inline=1, fontsize=10)\n plt.show()", "def combine_layers(particle_layers, layer_infos):\n electrode_and_layer_spacing = 0.0001\n\n # keeping track of height, volume, and minimum and maximum x and y lengths of prism\n total_height = electrode_and_layer_spacing\n total_volume = 0\n\n max_x = 0\n max_y = 0\n \n # keeping track of the height of each layer\n layer_heights = []\n # TODO: do this better\n x_position_prism = 100000000000\n y_position_prism = 100000000000\n # looping through the layer(s)\n for info in layer_infos:\n total_height += ((info[4]*2) + electrode_and_layer_spacing)\n layer_heights += [info[4]*2]\n total_volume += info[5]\n if info[0] > max_x:\n max_x = info[0]\n if info[1] > max_y:\n max_y = info[1]\n # if x and/or y position are the lowest yet, update the prism x and/or y postion \n if info[2] < x_position_prism:\n x_position_prism = info[2]\n if info[3] < y_position_prism:\n y_position_prism = info[3]\n\n x_length_prism = max_x - x_position_prism\n y_length_prism = max_y - y_position_prism\n\n # calculate prism volume fraction\n volume_fraction = total_volume/(x_length_prism*y_length_prism*total_height)\n\n composite_info = [total_volume, x_length_prism, y_length_prism, total_height, x_position_prism, y_position_prism, volume_fraction]\n\n composite_particles = {}\n particleID = 1\n layer_counter = 0\n height_adjustment = electrode_and_layer_spacing\n # loop through the layers\n for layer in particle_layers:\n # loop through the particles\n for particle in layer:\n # get particle data\n particle_data = layer[particle]\n # if the particle has an x and y position, a, b, and c radii, and an angle\n if len(particle_data) == 6:\n\n # calculating a randomized current height\n leftover_space = layer_heights[layer_counter] - (particle_data[5][1]*2)\n if leftover_space > 0.001:\n rand_offset = np.random.uniform(0, leftover_space)\n current_height = height_adjustment + particle_data[5][1] + rand_offset\n else:\n current_height = height_adjustment + (layer_heights[layer_counter]/2)\n\n composite_particles[particleID] = [particle_data[0], particle_data[1], particle_data[2], particle_data[3], particle_data[4], particle_data[5]]\n composite_particles[particleID] += [(\"z\", current_height)]\n \n particleID += 1\n\n # add the height of the layer we just looped through and a space between layers to the height adjustment\n height_adjustment += (layer_heights[layer_counter] + electrode_and_layer_spacing)\n # increment layer counter\n layer_counter += 1\n\n return composite_particles, composite_info", "def zoning_etl():\r\n with arcetl.ArcETL(\"Zoning\") as etl:\r\n etl.init_schema(dataset.ZONING.path(\"pub\"))\r\n for _path in dataset.ZONING.path(\"inserts\"):\r\n etl.transform(arcetl.features.insert_from_path, insert_dataset_path=_path)\r\n # Need a singular ID for overlay use.\r\n etl.transform(arcetl.attributes.update_by_unique_id, field_name=\"zoning_id\")\r\n # Still need overlay 1-4 format for taxlot overlays.\r\n for i in [1, 2, 3, 4]:\r\n etl.transform(\r\n arcetl.attributes.update_by_function,\r\n field_name=\"overlay\" + str(i),\r\n ##TODO: Lambda --> helper function.\r\n function=(\r\n lambda overlays, position=i: overlays.split(\",\")[position - 1]\r\n if (overlays and len(overlays.split(\",\")) >= position)\r\n else None\r\n ),\r\n field_as_first_arg=False,\r\n arg_field_names=[\"alloverlays\"],\r\n )\r\n etl.load(dataset.ZONING.path(\"pub\"))", "def drawOverlays(self):\r\n\t\tpass", "def display_coronal_with_overlay(temporal_slice, coronal_slice, images, masks, label, window_min, window_max):\n img = images[temporal_slice][:,coronal_slice,:]\n msk = masks[temporal_slice][:,coronal_slice,:]==label\n\n overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) \n # Flip the image so that corresponds to correct radiological view.\n plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img)))\n plt.axis('off')\n plt.show()", "def makeSphericalHarmonicsScene(index):\n nu = 2**(4+index) + 1\n nv = nu\n xyzs, colors = sphericalHarmonics(nu, nv, CircularColorMap())\n surf = makeSurface(nu, nv, xyzs, colors)\n \n root = iv.Separator()\n root += iv.DirectionalLight()\n root += iv.OrthographicCamera()\n root += iv.TrackballManip()\n root += surf\n return root", "def vis_2d_field(odf, sphere):\n\n r = window.Renderer()\n sfu = actor.odf_slicer(odf.reshape(1, *odf.shape), sphere=sphere, colormap='plasma', scale=0.5)\n sfu.display(x=0)\n r.add(sfu)\n window.show(r)", "def createMapImage(self):\n # create background, landscape and object layer\n self.background_layer = Layers.BackgroundLayer()\n self.landscape_layer = Layers.LandscapeLayer()\n self.object_layer = Layers.ObjectLayer()\n\n # create first tank\n tank_one = Objects.Tank(0, 200, (self.height // 2),\n 100, 100, core.Qt.yellow, core.Qt.cyan)\n self.tanks.append(tank_one)\n self.getNewCoordinates(self.tanks[0].tank_id, 0)\n\n # create second tank\n tank_two = Objects.Tank(\n 1, (self.width - 200), (self.height // 2),\n 100, 100, core.Qt.black, core.Qt.red\n )\n self.tanks.append(tank_two)\n self.getNewCoordinates(self.tanks[1].tank_id, 0)\n # paint tanks\n tmp_painter = gui.QPainter(self.object_layer)\n tmp_painter.drawImage(self.tanks[0].x_position,\n self.tanks[0].y_position, self.tanks[0])\n tmp_painter.drawImage(self.tanks[1].x_position,\n self.tanks[1].y_position, self.tanks[1])\n tmp_painter.end()\n\n # draw all layers\n self.drawMap()\n self.drawObjects()\n self.drawGame()\n self.setPixmap(gui.QPixmap.fromImage(self.game_image))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up an import project for a study and imports it.
def setup_import_project( root_path: pathlib.Path, study: StudyInputLayout, gpf_instance: GPFInstance, project_config_update: Optional[dict[str, Any]] = None, project_config_overwrite: Optional[dict[str, Any]] = None ) -> ImportProject: project_config = setup_import_project_config( root_path, study, gpf_instance, project_config_update=project_config_update, project_config_overwrite=project_config_overwrite) # pylint: disable=import-outside-toplevel project = ImportProject.build_from_file( project_config, gpf_instance=gpf_instance) return project
[ "def import_project(self,project_dir):\n # Check that target directory exists\n project_dir = os.path.abspath(project_dir)\n # Check that project doesn't already exist\n project_name = os.path.basename(project_dir)\n project_metadata = self.load_project_metadata()\n if project_name in [p['Project'] for p in project_metadata] or \\\n utils.AnalysisProject(project_name,\n os.path.join(self.analysis_dir,\n project_name)).exists:\n raise Exception(\"Project called '%s' already exists\" %\n project_name)\n # Load target as a project\n project = utils.AnalysisProject(project_name,project_dir)\n # Rsync the project directory\n print \"Importing project directory contents for '%s'\" % project_name\n try:\n excludes = ['--exclude=tmp.*',\n '--exclude=qc_report.*']\n rsync = applications.general.rsync(project_dir,\n self.analysis_dir,\n extra_options=excludes)\n print \"Running %s\" % rsync\n status = rsync.run_subprocess(log=self.log_path('import_project.rsync.log'))\n except Exception as ex:\n logging.error(\"Exception importing project: %s\" % ex)\n raise ex\n if status != 0:\n raise Exception(\"Failed to import project from %s (status %s)\" %\n (project_dir,status))\n # Update the projects.info metadata file\n print \"Updating projects.info file with imported project\"\n project_metadata = self.load_project_metadata()\n sample_names = [s.name for s in project.samples]\n project_metadata.add_project(project_name,\n sample_names,\n user=project.info.user,\n library_type=project.info.library_type,\n single_cell_platform=project.info.single_cell_platform,\n organism=project.info.organism,\n PI=project.info.PI,\n comments=project.info.comments)\n project_metadata.save()\n # Report\n print \"Projects now in metadata file:\"\n for p in project_metadata:\n print \"- %s\" % p['Project']\n # Update the QC report\n try:\n project = self.get_analysis_projects(pattern=project_name)[0]\n except Exception as ex:\n logging.error(\"Exception when trying to acquire project %s: %s\"\n % (project_name,ex))\n return\n if project.qc is None:\n print \"No QC for %s\" % project_name\n else:\n if project.qc.verify():\n try:\n project.qc_report()\n print \"Updated QC report for %s\" % project_name\n except Exception, ex:\n logging.error(\"import_project: failed to generate QC \"\n \"report for %s\" % project_name)", "def cnv_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, cnv_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], [], [], cnv_paths)\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def _load(self) -> None:\n mod: module.Module\n mod, _ = get_module()\n self._create_project_and_load(model=f\"import {mod.name}\")", "def vcf_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, vcf_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, vcf_paths, [], [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def test_import():\n import tvguide\n from tvguide import tvguide\n from tvguide import tvguide_csv\n from tvguide import TessPointing\n from tvguide import check_many\n from tvguide import check_observable", "def test_import_project():\n\n from {{ cookiecutter.project_slug }}.main import main\n\n assert main() is None", "def setup(self, projects):\n self.set_projects(projects=projects)", "def denovo_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, denovo_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], denovo_paths, [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def dimport(model_path, identifier, name, description, project_path, parent, base_model, overwrite, from_diff):\n\n project = _load_project(project_path)\n try:\n if from_diff:\n with open(model_path) as diff_file:\n df = json.load(diff_file)\n # model_path is a json diff file\n\n model = project.load_diff(df, base_model=base_model)\n else:\n model = load_model(model_path)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-1)\n\n # Check if the design already exists\n new = True\n if identifier in project.list_designs and not overwrite:\n click.echo('Error: Design {} already exists. Use --overwrite to replace'.format(identifier))\n exit(-2)\n elif identifier in project.list_designs:\n new = False\n\n if parent is not None and parent not in project.list_designs:\n click.echo('Error: Parent design {} does not exist'.format(parent))\n exit(-3)\n\n if name is None and new:\n name = click.prompt('Please enter a name for this design', type=str)\n\n if description is None and new:\n description = click.prompt('Please enter a description for this design', type=str)\n\n try:\n project.save_design(model, identifier, name=name, description=description, parent=parent, base_model=base_model,\n overwrite=overwrite)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-4)\n\n click.echo('Design successfully added to project')", "def import_fauna(survey, species_list, infile, format, verbose=None):\n start = time.time()\n now = datetime.datetime.now()\n info_msg = ' Started the import_modules of fauna information {date}'. \\\n format(date=now.strftime(\"%Y-%m-%d %H:%M\"))\n logging.info(info_msg)\n try:\n version = tools_lib.get_git_tag().strip()\n except:\n version = ''\n info_msg = ' ForestEye Collect Importer {version}'.format(version=version)\n logging.info(info_msg)\n\n if format == '2010':\n import_fauna_2010.import_fauna_2010(survey, species_list, infile)\n elif format == '2015':\n import_fauna_2015.import_fauna_2015(survey, species_list, infile)\n else:\n warn_message = \"The file format {format} is not supported\".format(format=format)\n logging.warn(warn_message)\n\n info_msg = \"The import_modules took {time:.2} seconds\".format(time=time.time() - start)\n logging.info(info_msg)\n now = datetime.datetime.now()\n info_msg = ' Finished the import_modules of fauna information dataset {date}'. \\\n format(date=now.strftime(\"%Y-%m-%d %H:%M\"))\n logging.info(info_msg)", "def test_01(self):\n creator = EntityCreator()\n data_source = DataSource()\n importer = Importer(creator, data_source)\n self.assertTrue(importer, 'Importer could not be instantiated')", "def import_features(slick, path, onstart=import_start, onend=import_end, delete=False):\n assert isinstance(slick, slickqa.SlickConnection)\n errors = []\n projects_dir = os.path.join(path, 'projects')\n if not os.path.exists(projects_dir):\n return errors\n features_to_import = glob.glob(os.path.join(projects_dir, '*', 'components', '*', 'features', '*.yaml'))\n features_count = len(features_to_import)\n for index, feature_yaml_path in enumerate(features_to_import):\n name = os.path.basename(feature_yaml_path)[:-5]\n features_path = os.path.dirname(feature_yaml_path)\n project_name = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(feature_yaml_path)))))\n component_name = os.path.basename(os.path.dirname(os.path.dirname(feature_yaml_path)))\n onstart('Feature', name, index, features_count)\n try:\n info = dict()\n with open(feature_yaml_path, 'r') as yaml_file:\n info = yaml.load(yaml_file)\n if 'name' not in info:\n info['name'] = name\n feature = slickqa.Feature.from_dict(info)\n\n # Add the image if it exists\n if os.path.exists(os.path.join(features_path, \"{}.png\".format(name))):\n img = slick.files.upload_local_file(os.path.join(features_path, \"{}.png\".format(name)))\n feature.img = img\n elif os.path.exists(os.path.join(features_path, \"{}.gif\".format(name))):\n img = slick.files.upload_local_file(os.path.join(features_path, \"{}.gif\".format(name)))\n feature.img = img\n elif os.path.exists(os.path.join(features_path, \"{}.jpg\".format(name))):\n img = slick.files.upload_local_file(os.path.join(features_path, \"{}.jpg\".format(name)))\n feature.img = img\n existingComponent = None\n try:\n existingComponent = slick.projects(project_name).components(component_name).get()\n except slickqa.SlickCommunicationError:\n pass\n if existingComponent is None:\n existingComponent = slickqa.Component()\n existingComponent.name = component_name\n existingComponent = slick.projects(project_name).components(existingComponent).create()\n\n existing = None\n if hasattr(existingComponent, 'features') and existingComponent.features is not None:\n for potential in existingComponent.features:\n assert isinstance(potential, slickqa.Feature)\n if potential.name == feature.name:\n feature.id = potential.id\n break\n else:\n existingComponent.features = []\n if not hasattr(feature, 'id') or feature.id is None:\n feature.id = str(bson.ObjectId())\n existingComponent.features.append(feature)\n slick.projects(project_name).components(existingComponent).update()\n if delete:\n os.unlink(feature_yaml_path)\n except:\n errors.append(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1]))\n onend('Feature', name, index, features_count)\n\n return errors", "def _import(self):\n\t\tbpy.ops.import_scene.gltf(filepath=self.filename)", "def test_import():\n from crank import DihedralScanner, QMEngine, PriorityQueue", "def testImport(self):\n success = False\n try:\n from cutlass import WgsAssembledSeqSet\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(WgsAssembledSeqSet is None)", "def test_researchstudy_1(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"]\n / \"researchstudy-example-ctgov-study-record.json\"\n )\n inst = researchstudy.ResearchStudy.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ResearchStudy\" == inst.resource_type\n\n impl_researchstudy_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ResearchStudy\" == data[\"resourceType\"]\n\n inst2 = researchstudy.ResearchStudy(**data)\n impl_researchstudy_1(inst2)", "def import_folder_as_python_project(root, project_name, folder, is_absolute=False, refer_to_omelette=False, create_test_launcher=True):\n \n path_head, project_name = os.path.split(folder)\n \n\n old_project = get_project(root.getWorkspace(), project_name)\n \n exists = old_project != None\n \n project = root.getProject(project_name);\n \n if not exists: \n \n if is_absolute:\n # http://help.eclipse.org/help32/topic/org.eclipse.platform.doc.isv/reference/api/org/eclipse/core/resources/IProjectDescription.html#setLocation(org.eclipse.core.runtime.IPath)\n # \n \n # http://cvalcarcel.wordpress.com/2009/07/26/writing-an-eclipse-plug-in-part-4-create-a-custom-project-in-eclipse-new-project-wizard-the-behavior/\n desc = project.getWorkspace().newProjectDescription(project.getName()) \n path = Path(folder) \n desc.setLocation(path)\n project.create(desc, progress_monitor);\n else: \n project.create(progress_monitor);\n \n if not project.isOpen():\n # FFFFFFFFffffff....\n opener = QuiteSafeProjectOpener(project)\n display.syncExec(opener) \n \n if not project.hasNature(PythonNature.PYTHON_NATURE_ID):\n pythonify(project)\n \n if refer_to_omelette:\n make_refer_to_omelette(project)\n \n set_src_folders(project)\n \n if create_test_launcher:\n create_launcher(project, unit_tests=True)", "def init(projectname):\n log.debug('Running init')\n get_project(projectname).init()", "def test_project_samples_import_with_history_no_collections(\n self, setup_irida, setup_galaxy, driver, tmpdir):\n irida = setup_irida\n project_name = 'ImportProjectSamples'\n project = irida.post(self.IRIDA_PROJECTS,\n json={'name': project_name})\n\n samples = self.get_href(project, 'project/samples')\n sample1 = irida.post(samples, json={'sampleName': 'PS_Sample1',\n 'sequencerSampleId': 'PS_1'})\n sequences1 = self.get_href(sample1, 'sample/sequenceFiles')\n\n # Pytest manages the temporary directory\n seq1 = tmpdir.join(\"seq1.fastq\")\n seq1.write(self.FASTQ_CONTENTS)\n sequence1 = irida.post(sequences1, files={'file': open(str(seq1),\n 'rb')})\n\n seq2 = tmpdir.join(\"seq2.fastq\")\n seq2.write(self.FASTQ_CONTENTS)\n sequence2 = irida.post(sequences1, files={'file': open(str(seq2),\n 'rb')})\n\n sample2 = irida.post(samples, json={'sampleName': 'PS_Sample2',\n 'sequencerSampleId': 'PS_2'})\n sequences2 = self.get_href(sample2, 'sample/sequenceFiles')\n seq3 = tmpdir.join(\"seq3.fastq\")\n seq3.write(self.FASTQ_CONTENTS)\n sequence3 = irida.post(sequences2, files={'file': open(str(seq3),\n 'rb')})\n\n # Export to Galaxy using the button on the dropdown menu\n driver.get(self.GALAXY_URL)\n history_panel = driver.find_element_by_id('current-history-panel')\n initially_succeeded = len(history_panel.find_elements_by_class_name(\n 'state-ok'))\n driver.find_element_by_css_selector(\"#title_getext > a > span\").click()\n driver.find_element_by_link_text(\"IRIDA\").click()\n\n # Sometimes a login is required\n try:\n self.login_irida(driver, self.IRIDA_USER, self.IRIDA_PASSWORD)\n except NoSuchElementException:\n pass\n\n # Pick the last matching project on this page\n driver.find_elements_by_link_text(project_name)[-1].click()\n\n # These checkbox elements cannot be clicked directly\n # Using IDs would complicate running the tests without restarting IRIDA\n action = webdriver.common.action_chains.ActionChains(driver)\n stale = True\n timeout = 0\n while stale:\n try:\n checkboxes = driver.find_elements_by_xpath(\"//table[contains(@class, 'selectable')]/tbody/tr/td[1]/input[@type='checkbox']\")\n\n checkboxes[0].click()\n checkboxes[1].click()\n\n stale = False\n except (StaleElementReferenceException, NoSuchElementException):\n time.sleep(1)\n timeout += 1\n\n if timeout == 60:\n raise\n\n driver.find_element_by_id('export-samples-btn').click()\n\n driver.find_element_by_xpath(\"//li/a[contains(@ng-click, 'toolsCtrl.galaxy')]\").click()\n\n WebDriverWait(driver, self.WAIT).until(\n EC.presence_of_element_located((By.ID, self.IRIDA_GALAXY_MODAL))\n )\n\n driver.find_element_by_id('email').clear()\n driver.find_element_by_id('email').send_keys(self.EMAIL)\n\n # true by default, so this is disabling it\n driver.find_element_by_id('makepairedcollection').click()\n\n driver.find_element_by_xpath(\"//button[contains(@ng-click, 'gCtrl.upload()')]\").click()\n\n WebDriverWait(driver, self.WAIT).until(\n EC.presence_of_element_located((By.ID, 'current-history-panel'))\n )\n time.sleep(120) # Wait for import to complete\n history_panel = driver.find_element_by_id('current-history-panel')\n succeeded = len(history_panel.find_elements_by_class_name('state-ok'))\n\n\tassert (succeeded - initially_succeeded == 4,\n \"Import did not complete successfully\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a VCF study and return the import project.
def vcf_import( root_path: pathlib.Path, study_id: str, ped_path: pathlib.Path, vcf_paths: list[pathlib.Path], gpf_instance: GPFInstance, project_config_update: Optional[dict[str, Any]] = None, project_config_overwrite: Optional[dict[str, Any]] = None ) -> ImportProject: study = StudyInputLayout(study_id, ped_path, vcf_paths, [], [], []) project = setup_import_project( root_path, study, gpf_instance, project_config_update=project_config_update, project_config_overwrite=project_config_overwrite) return project
[ "def cnv_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, cnv_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], [], [], cnv_paths)\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def setup_import_project(\n root_path: pathlib.Path, study: StudyInputLayout,\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n project_config = setup_import_project_config(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n\n # pylint: disable=import-outside-toplevel\n project = ImportProject.build_from_file(\n project_config,\n gpf_instance=gpf_instance)\n return project", "def denovo_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, denovo_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], denovo_paths, [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def get_import_file(request):\n from seed.models import obj_to_dict\n\n import_file_id = request.GET.get('import_file_id', '')\n orgs = request.user.orgs.all()\n import_file = ImportFile.objects.get(\n pk=import_file_id\n )\n d = ImportRecord.objects.filter(\n super_organization__in=orgs, pk=import_file.import_record_id\n )\n # check if user has access to the import file\n if not d.exists():\n return {\n 'status': 'success',\n 'import_file': {},\n }\n\n f = obj_to_dict(import_file)\n f['name'] = import_file.filename_only\n f['dataset'] = obj_to_dict(import_file.import_record)\n # add the importfiles for the matching select\n f['dataset']['importfiles'] = []\n files = f['dataset']['importfiles']\n for i in import_file.import_record.files:\n files.append({\n 'name': i.filename_only,\n 'id': i.pk\n })\n # make the first element in the list the current import file\n i = files.index({\n 'name': import_file.filename_only,\n 'id': import_file.pk\n })\n files[0], files[i] = files[i], files[0]\n\n return {\n 'status': 'success',\n 'import_file': f,\n }", "def _import(self):\n\t\tbpy.ops.import_scene.gltf(filepath=self.filename)", "def import_survey(self,sImportData,sImportDataType,sNewSurveyName=None,DestSurveyID=None):\n params = self.__format_params(locals().copy())\n method = \"import_survey\"\n r = self.call_rpc(method,params)\n return r.json()['result']", "def dimport(model_path, identifier, name, description, project_path, parent, base_model, overwrite, from_diff):\n\n project = _load_project(project_path)\n try:\n if from_diff:\n with open(model_path) as diff_file:\n df = json.load(diff_file)\n # model_path is a json diff file\n\n model = project.load_diff(df, base_model=base_model)\n else:\n model = load_model(model_path)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-1)\n\n # Check if the design already exists\n new = True\n if identifier in project.list_designs and not overwrite:\n click.echo('Error: Design {} already exists. Use --overwrite to replace'.format(identifier))\n exit(-2)\n elif identifier in project.list_designs:\n new = False\n\n if parent is not None and parent not in project.list_designs:\n click.echo('Error: Parent design {} does not exist'.format(parent))\n exit(-3)\n\n if name is None and new:\n name = click.prompt('Please enter a name for this design', type=str)\n\n if description is None and new:\n description = click.prompt('Please enter a description for this design', type=str)\n\n try:\n project.save_design(model, identifier, name=name, description=description, parent=parent, base_model=base_model,\n overwrite=overwrite)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-4)\n\n click.echo('Design successfully added to project')", "def importer(db):\n return Importer.objects.get(name=\"Test Importer 1\")", "def readProjectFromSVS(filepath):\n from sasdata.dataloader.readers.cansas_reader import Reader as CansasReader\n from sas.sascalc.fit.pagestate import Reader\n\n loader = Loader()\n loader.associate_file_reader('.svs', Reader)\n temp = loader.load(filepath)\n\n # CRUFT: SasView 4.x uses a callback interface to register bits of state\n state_svs = []\n def collector(state=None, datainfo=None, format=None):\n if state is not None:\n state_svs.append(state)\n state_reader = Reader(call_back=collector)\n data_svs = state_reader.read(filepath)\n\n if isinstance(temp, list) and isinstance(state_svs, list):\n output = list(zip(temp, state_svs))\n else:\n output = [(temp, state_svs)]\n return output", "def import_fauna(survey, species_list, infile, format, verbose=None):\n start = time.time()\n now = datetime.datetime.now()\n info_msg = ' Started the import_modules of fauna information {date}'. \\\n format(date=now.strftime(\"%Y-%m-%d %H:%M\"))\n logging.info(info_msg)\n try:\n version = tools_lib.get_git_tag().strip()\n except:\n version = ''\n info_msg = ' ForestEye Collect Importer {version}'.format(version=version)\n logging.info(info_msg)\n\n if format == '2010':\n import_fauna_2010.import_fauna_2010(survey, species_list, infile)\n elif format == '2015':\n import_fauna_2015.import_fauna_2015(survey, species_list, infile)\n else:\n warn_message = \"The file format {format} is not supported\".format(format=format)\n logging.warn(warn_message)\n\n info_msg = \"The import_modules took {time:.2} seconds\".format(time=time.time() - start)\n logging.info(info_msg)\n now = datetime.datetime.now()\n info_msg = ' Finished the import_modules of fauna information dataset {date}'. \\\n format(date=now.strftime(\"%Y-%m-%d %H:%M\"))\n logging.info(info_msg)", "def import_project(self,project_dir):\n # Check that target directory exists\n project_dir = os.path.abspath(project_dir)\n # Check that project doesn't already exist\n project_name = os.path.basename(project_dir)\n project_metadata = self.load_project_metadata()\n if project_name in [p['Project'] for p in project_metadata] or \\\n utils.AnalysisProject(project_name,\n os.path.join(self.analysis_dir,\n project_name)).exists:\n raise Exception(\"Project called '%s' already exists\" %\n project_name)\n # Load target as a project\n project = utils.AnalysisProject(project_name,project_dir)\n # Rsync the project directory\n print \"Importing project directory contents for '%s'\" % project_name\n try:\n excludes = ['--exclude=tmp.*',\n '--exclude=qc_report.*']\n rsync = applications.general.rsync(project_dir,\n self.analysis_dir,\n extra_options=excludes)\n print \"Running %s\" % rsync\n status = rsync.run_subprocess(log=self.log_path('import_project.rsync.log'))\n except Exception as ex:\n logging.error(\"Exception importing project: %s\" % ex)\n raise ex\n if status != 0:\n raise Exception(\"Failed to import project from %s (status %s)\" %\n (project_dir,status))\n # Update the projects.info metadata file\n print \"Updating projects.info file with imported project\"\n project_metadata = self.load_project_metadata()\n sample_names = [s.name for s in project.samples]\n project_metadata.add_project(project_name,\n sample_names,\n user=project.info.user,\n library_type=project.info.library_type,\n single_cell_platform=project.info.single_cell_platform,\n organism=project.info.organism,\n PI=project.info.PI,\n comments=project.info.comments)\n project_metadata.save()\n # Report\n print \"Projects now in metadata file:\"\n for p in project_metadata:\n print \"- %s\" % p['Project']\n # Update the QC report\n try:\n project = self.get_analysis_projects(pattern=project_name)[0]\n except Exception as ex:\n logging.error(\"Exception when trying to acquire project %s: %s\"\n % (project_name,ex))\n return\n if project.qc is None:\n print \"No QC for %s\" % project_name\n else:\n if project.qc.verify():\n try:\n project.qc_report()\n print \"Updated QC report for %s\" % project_name\n except Exception, ex:\n logging.error(\"import_project: failed to generate QC \"\n \"report for %s\" % project_name)", "def import_features(slick, path, onstart=import_start, onend=import_end, delete=False):\n assert isinstance(slick, slickqa.SlickConnection)\n errors = []\n projects_dir = os.path.join(path, 'projects')\n if not os.path.exists(projects_dir):\n return errors\n features_to_import = glob.glob(os.path.join(projects_dir, '*', 'components', '*', 'features', '*.yaml'))\n features_count = len(features_to_import)\n for index, feature_yaml_path in enumerate(features_to_import):\n name = os.path.basename(feature_yaml_path)[:-5]\n features_path = os.path.dirname(feature_yaml_path)\n project_name = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(feature_yaml_path)))))\n component_name = os.path.basename(os.path.dirname(os.path.dirname(feature_yaml_path)))\n onstart('Feature', name, index, features_count)\n try:\n info = dict()\n with open(feature_yaml_path, 'r') as yaml_file:\n info = yaml.load(yaml_file)\n if 'name' not in info:\n info['name'] = name\n feature = slickqa.Feature.from_dict(info)\n\n # Add the image if it exists\n if os.path.exists(os.path.join(features_path, \"{}.png\".format(name))):\n img = slick.files.upload_local_file(os.path.join(features_path, \"{}.png\".format(name)))\n feature.img = img\n elif os.path.exists(os.path.join(features_path, \"{}.gif\".format(name))):\n img = slick.files.upload_local_file(os.path.join(features_path, \"{}.gif\".format(name)))\n feature.img = img\n elif os.path.exists(os.path.join(features_path, \"{}.jpg\".format(name))):\n img = slick.files.upload_local_file(os.path.join(features_path, \"{}.jpg\".format(name)))\n feature.img = img\n existingComponent = None\n try:\n existingComponent = slick.projects(project_name).components(component_name).get()\n except slickqa.SlickCommunicationError:\n pass\n if existingComponent is None:\n existingComponent = slickqa.Component()\n existingComponent.name = component_name\n existingComponent = slick.projects(project_name).components(existingComponent).create()\n\n existing = None\n if hasattr(existingComponent, 'features') and existingComponent.features is not None:\n for potential in existingComponent.features:\n assert isinstance(potential, slickqa.Feature)\n if potential.name == feature.name:\n feature.id = potential.id\n break\n else:\n existingComponent.features = []\n if not hasattr(feature, 'id') or feature.id is None:\n feature.id = str(bson.ObjectId())\n existingComponent.features.append(feature)\n slick.projects(project_name).components(existingComponent).update()\n if delete:\n os.unlink(feature_yaml_path)\n except:\n errors.append(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1]))\n onend('Feature', name, index, features_count)\n\n return errors", "def load_study_from_run(run: neptune.Run):\n if run['study/storage_type'].fetch() == 'InMemoryStorage':\n return _get_pickle(path='study/study', run=run)\n else:\n return optuna.load_study(study_name=run['study/study_name'].fetch(), storage=run['study/storage_url'].fetch())", "def open(self, infile):\r\n if infile.endswith('.xml'):\r\n self.proj = None\r\n try:\r\n self.tree = ET.parse(infile)\r\n return self.tree\r\n except Exception, e:\r\n print \"Error opening file\",e\r\n usage()\r\n else:\r\n try:\r\n self.mpp = win32com.client.Dispatch(\"MSProject.Application\")\r\n self.mpp.Visible = False\r\n self.mpp.FileOpen(infile)\r\n self.proj = self.mpp.ActiveProject\r\n return self.proj\r\n except Exception, e:\r\n print \"Error opening file\",e\r\n usage()", "def read_project_file():\n filename = input(\"Project file name: \")\n with open(filename, \"r\") as file:\n project = file_line_into_tuple(file.readline())\n \n print(project)\n return project", "def importStep(fileName):\n #Now read and return the shape\n try:\n rshape = Part.read(fileName)\n\n # Extract all solids and surfaces\n geometry = []\n for solid in rshape.Solids:\n geometry.append(Shape.cast(solid))\n\n for shell in rshape.Shells:\n geometry.append(Shape.cast(shell))\n\n return cadquery.Workplane(\"XY\").newObject(geometry)\n\n except:\n raise ValueError(\"STEP File Could not be loaded\")", "def import_push_button_clicked(self):\n # get the previous version\n previous_version_id = self.previous_versions_table_widget.current_version.id\n\n from stalker import Version\n\n previous_version = Version.query.get(previous_version_id)\n\n if not self.check_version_file_exists(previous_version):\n return\n\n # logger.debug(\"importing version %s\" % previous_version)\n\n # call the environments import_ method\n if self.dcc is not None:\n # get the use namespace state\n use_namespace = self.use_namespace_check_box.isChecked()\n\n self.dcc.import_(previous_version, use_namespace)\n\n # inform the user about what happened\n if logger.level != logging.DEBUG:\n QtWidgets.QMessageBox.information(\n self,\n \"Import\",\n \"%s\\n\\n has been imported correctly!\" % previous_version.filename,\n QtWidgets.QMessageBox.Ok,\n )", "def agent_importer(db):\n return Importer.objects.get(name=\"Test Importer 1 Agent 1\")", "def test_do_import(self):\n user = get_user_model().objects.get(email='instructor01@bogus.com')\n wflow = Workflow.objects.get(name=self.wflow_name)\n\n with open(os.path.join(\n settings.BASE_DIR(),\n 'ontask',\n 'fixtures',\n 'survey_to_import.gz'\n ), 'rb') as file_obj:\n do_import_action(user, wflow, 'a1', file_obj)\n\n Action.objects.get(name='a1')\n self.assertTrue(check_wf_df(wflow))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a de Novo study and return the import project.
def denovo_import( root_path: pathlib.Path, study_id: str, ped_path: pathlib.Path, denovo_paths: list[pathlib.Path], gpf_instance: GPFInstance, project_config_update: Optional[dict[str, Any]] = None, project_config_overwrite: Optional[dict[str, Any]] = None ) -> ImportProject: study = StudyInputLayout(study_id, ped_path, [], denovo_paths, [], []) project = setup_import_project( root_path, study, gpf_instance, project_config_update=project_config_update, project_config_overwrite=project_config_overwrite) return project
[ "def cnv_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, cnv_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], [], [], cnv_paths)\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def setup_import_project(\n root_path: pathlib.Path, study: StudyInputLayout,\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n project_config = setup_import_project_config(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n\n # pylint: disable=import-outside-toplevel\n project = ImportProject.build_from_file(\n project_config,\n gpf_instance=gpf_instance)\n return project", "def vcf_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, vcf_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, vcf_paths, [], [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def load_study_from_run(run: neptune.Run):\n if run['study/storage_type'].fetch() == 'InMemoryStorage':\n return _get_pickle(path='study/study', run=run)\n else:\n return optuna.load_study(study_name=run['study/study_name'].fetch(), storage=run['study/storage_url'].fetch())", "def import_project(self,project_dir):\n # Check that target directory exists\n project_dir = os.path.abspath(project_dir)\n # Check that project doesn't already exist\n project_name = os.path.basename(project_dir)\n project_metadata = self.load_project_metadata()\n if project_name in [p['Project'] for p in project_metadata] or \\\n utils.AnalysisProject(project_name,\n os.path.join(self.analysis_dir,\n project_name)).exists:\n raise Exception(\"Project called '%s' already exists\" %\n project_name)\n # Load target as a project\n project = utils.AnalysisProject(project_name,project_dir)\n # Rsync the project directory\n print \"Importing project directory contents for '%s'\" % project_name\n try:\n excludes = ['--exclude=tmp.*',\n '--exclude=qc_report.*']\n rsync = applications.general.rsync(project_dir,\n self.analysis_dir,\n extra_options=excludes)\n print \"Running %s\" % rsync\n status = rsync.run_subprocess(log=self.log_path('import_project.rsync.log'))\n except Exception as ex:\n logging.error(\"Exception importing project: %s\" % ex)\n raise ex\n if status != 0:\n raise Exception(\"Failed to import project from %s (status %s)\" %\n (project_dir,status))\n # Update the projects.info metadata file\n print \"Updating projects.info file with imported project\"\n project_metadata = self.load_project_metadata()\n sample_names = [s.name for s in project.samples]\n project_metadata.add_project(project_name,\n sample_names,\n user=project.info.user,\n library_type=project.info.library_type,\n single_cell_platform=project.info.single_cell_platform,\n organism=project.info.organism,\n PI=project.info.PI,\n comments=project.info.comments)\n project_metadata.save()\n # Report\n print \"Projects now in metadata file:\"\n for p in project_metadata:\n print \"- %s\" % p['Project']\n # Update the QC report\n try:\n project = self.get_analysis_projects(pattern=project_name)[0]\n except Exception as ex:\n logging.error(\"Exception when trying to acquire project %s: %s\"\n % (project_name,ex))\n return\n if project.qc is None:\n print \"No QC for %s\" % project_name\n else:\n if project.qc.verify():\n try:\n project.qc_report()\n print \"Updated QC report for %s\" % project_name\n except Exception, ex:\n logging.error(\"import_project: failed to generate QC \"\n \"report for %s\" % project_name)", "def load_study(owner, title, info):\n study = load_study_from_cmd(owner, title, info)\n click.echo(\"Study successfully added to the database with id %s\"\n % study.id)", "def dimport(model_path, identifier, name, description, project_path, parent, base_model, overwrite, from_diff):\n\n project = _load_project(project_path)\n try:\n if from_diff:\n with open(model_path) as diff_file:\n df = json.load(diff_file)\n # model_path is a json diff file\n\n model = project.load_diff(df, base_model=base_model)\n else:\n model = load_model(model_path)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-1)\n\n # Check if the design already exists\n new = True\n if identifier in project.list_designs and not overwrite:\n click.echo('Error: Design {} already exists. Use --overwrite to replace'.format(identifier))\n exit(-2)\n elif identifier in project.list_designs:\n new = False\n\n if parent is not None and parent not in project.list_designs:\n click.echo('Error: Parent design {} does not exist'.format(parent))\n exit(-3)\n\n if name is None and new:\n name = click.prompt('Please enter a name for this design', type=str)\n\n if description is None and new:\n description = click.prompt('Please enter a description for this design', type=str)\n\n try:\n project.save_design(model, identifier, name=name, description=description, parent=parent, base_model=base_model,\n overwrite=overwrite)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-4)\n\n click.echo('Design successfully added to project')", "def importer(db):\n return Importer.objects.get(name=\"Test Importer 1\")", "def import_survey(self,sImportData,sImportDataType,sNewSurveyName=None,DestSurveyID=None):\n params = self.__format_params(locals().copy())\n method = \"import_survey\"\n r = self.call_rpc(method,params)\n return r.json()['result']", "def get_import_file(request):\n from seed.models import obj_to_dict\n\n import_file_id = request.GET.get('import_file_id', '')\n orgs = request.user.orgs.all()\n import_file = ImportFile.objects.get(\n pk=import_file_id\n )\n d = ImportRecord.objects.filter(\n super_organization__in=orgs, pk=import_file.import_record_id\n )\n # check if user has access to the import file\n if not d.exists():\n return {\n 'status': 'success',\n 'import_file': {},\n }\n\n f = obj_to_dict(import_file)\n f['name'] = import_file.filename_only\n f['dataset'] = obj_to_dict(import_file.import_record)\n # add the importfiles for the matching select\n f['dataset']['importfiles'] = []\n files = f['dataset']['importfiles']\n for i in import_file.import_record.files:\n files.append({\n 'name': i.filename_only,\n 'id': i.pk\n })\n # make the first element in the list the current import file\n i = files.index({\n 'name': import_file.filename_only,\n 'id': import_file.pk\n })\n files[0], files[i] = files[i], files[0]\n\n return {\n 'status': 'success',\n 'import_file': f,\n }", "def read_project_file():\n filename = input(\"Project file name: \")\n with open(filename, \"r\") as file:\n project = file_line_into_tuple(file.readline())\n \n print(project)\n return project", "def cmd_import(self):\n self.save()\n path = tkinter_filedialog.askopenfilename(\n initialdir=self.prefs[\"save_directory\"],\n filetypes=[(\"aeneas output ZIP file\", \".zip\"), (\"SMIL file\", \".smil\")],\n parent=self,\n title=\"Select aeneas output (SMIL or ZIP of SMILs)\"\n )\n if (path is not None) and (len(path) > 0) and (os.path.isfile(path)):\n if path.endswith(\".zip\"):\n self.import_zip_file(path)\n elif path.endswith(\".smil\"):\n self.import_smil_file(path)\n self.quit()", "def import_snapshot(self, snapshot:Snapshot):\n if snapshot.uid not in self.snapshot_ids:\n raise RuntimeError('This snapshot does not belong to the Experiment!')\n Task.init_import()\n # check out the relevant commit\n self.repo.head.reference = self.repo.commit(snapshot.commit_sha)\n self.repo.head.reset(index=True, working_tree=True)\n # import the correct file from the correct location\n backup_path = sys.path\n sys.path = [self.repo_path]\n module_name, _ = os.path.splitext(snapshot.filename)\n # the imported module triggers the other end of the mechanism\n importlib.import_module(module_name)\n # return to the original master head\n self.repo.head.reference = self.repo.heads[0]\n self.repo.head.reset(index=True, working_tree=True)\n # retrieve the imported object and clean up\n task_object = Task.retrieve_instance()\n sys.path = backup_path\n # before returning the object, link it with the Snapshot instance\n task_object.snapshot = snapshot\n return task_object", "def importStep(fileName):\n #Now read and return the shape\n try:\n rshape = Part.read(fileName)\n\n # Extract all solids and surfaces\n geometry = []\n for solid in rshape.Solids:\n geometry.append(Shape.cast(solid))\n\n for shell in rshape.Shells:\n geometry.append(Shape.cast(shell))\n\n return cadquery.Workplane(\"XY\").newObject(geometry)\n\n except:\n raise ValueError(\"STEP File Could not be loaded\")", "def test_researchstudy_1(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"]\n / \"researchstudy-example-ctgov-study-record.json\"\n )\n inst = researchstudy.ResearchStudy.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ResearchStudy\" == inst.resource_type\n\n impl_researchstudy_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ResearchStudy\" == data[\"resourceType\"]\n\n inst2 = researchstudy.ResearchStudy(**data)\n impl_researchstudy_1(inst2)", "def dsfinal2dsin(self):\n \n self.run_cmd('importInitial(\"dsfinal.txt\");')", "def _import(self):\n\t\tbpy.ops.import_scene.gltf(filepath=self.filename)", "def importToNewDocument(self, *args) -> \"adsk::core::Ptr< adsk::core::Document >\" :\n return _core.ImportManager_importToNewDocument(self, *args)", "def _load(self) -> None:\n mod: module.Module\n mod, _ = get_module()\n self._create_project_and_load(model=f\"import {mod.name}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a de Novo study and return the import project.
def cnv_import( root_path: pathlib.Path, study_id: str, ped_path: pathlib.Path, cnv_paths: list[pathlib.Path], gpf_instance: GPFInstance, project_config_update: Optional[dict[str, Any]] = None, project_config_overwrite: Optional[dict[str, Any]] = None ) -> ImportProject: study = StudyInputLayout(study_id, ped_path, [], [], [], cnv_paths) project = setup_import_project( root_path, study, gpf_instance, project_config_update=project_config_update, project_config_overwrite=project_config_overwrite) return project
[ "def denovo_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, denovo_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], denovo_paths, [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def setup_import_project(\n root_path: pathlib.Path, study: StudyInputLayout,\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n project_config = setup_import_project_config(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n\n # pylint: disable=import-outside-toplevel\n project = ImportProject.build_from_file(\n project_config,\n gpf_instance=gpf_instance)\n return project", "def vcf_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, vcf_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, vcf_paths, [], [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def load_study_from_run(run: neptune.Run):\n if run['study/storage_type'].fetch() == 'InMemoryStorage':\n return _get_pickle(path='study/study', run=run)\n else:\n return optuna.load_study(study_name=run['study/study_name'].fetch(), storage=run['study/storage_url'].fetch())", "def import_project(self,project_dir):\n # Check that target directory exists\n project_dir = os.path.abspath(project_dir)\n # Check that project doesn't already exist\n project_name = os.path.basename(project_dir)\n project_metadata = self.load_project_metadata()\n if project_name in [p['Project'] for p in project_metadata] or \\\n utils.AnalysisProject(project_name,\n os.path.join(self.analysis_dir,\n project_name)).exists:\n raise Exception(\"Project called '%s' already exists\" %\n project_name)\n # Load target as a project\n project = utils.AnalysisProject(project_name,project_dir)\n # Rsync the project directory\n print \"Importing project directory contents for '%s'\" % project_name\n try:\n excludes = ['--exclude=tmp.*',\n '--exclude=qc_report.*']\n rsync = applications.general.rsync(project_dir,\n self.analysis_dir,\n extra_options=excludes)\n print \"Running %s\" % rsync\n status = rsync.run_subprocess(log=self.log_path('import_project.rsync.log'))\n except Exception as ex:\n logging.error(\"Exception importing project: %s\" % ex)\n raise ex\n if status != 0:\n raise Exception(\"Failed to import project from %s (status %s)\" %\n (project_dir,status))\n # Update the projects.info metadata file\n print \"Updating projects.info file with imported project\"\n project_metadata = self.load_project_metadata()\n sample_names = [s.name for s in project.samples]\n project_metadata.add_project(project_name,\n sample_names,\n user=project.info.user,\n library_type=project.info.library_type,\n single_cell_platform=project.info.single_cell_platform,\n organism=project.info.organism,\n PI=project.info.PI,\n comments=project.info.comments)\n project_metadata.save()\n # Report\n print \"Projects now in metadata file:\"\n for p in project_metadata:\n print \"- %s\" % p['Project']\n # Update the QC report\n try:\n project = self.get_analysis_projects(pattern=project_name)[0]\n except Exception as ex:\n logging.error(\"Exception when trying to acquire project %s: %s\"\n % (project_name,ex))\n return\n if project.qc is None:\n print \"No QC for %s\" % project_name\n else:\n if project.qc.verify():\n try:\n project.qc_report()\n print \"Updated QC report for %s\" % project_name\n except Exception, ex:\n logging.error(\"import_project: failed to generate QC \"\n \"report for %s\" % project_name)", "def load_study(owner, title, info):\n study = load_study_from_cmd(owner, title, info)\n click.echo(\"Study successfully added to the database with id %s\"\n % study.id)", "def dimport(model_path, identifier, name, description, project_path, parent, base_model, overwrite, from_diff):\n\n project = _load_project(project_path)\n try:\n if from_diff:\n with open(model_path) as diff_file:\n df = json.load(diff_file)\n # model_path is a json diff file\n\n model = project.load_diff(df, base_model=base_model)\n else:\n model = load_model(model_path)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-1)\n\n # Check if the design already exists\n new = True\n if identifier in project.list_designs and not overwrite:\n click.echo('Error: Design {} already exists. Use --overwrite to replace'.format(identifier))\n exit(-2)\n elif identifier in project.list_designs:\n new = False\n\n if parent is not None and parent not in project.list_designs:\n click.echo('Error: Parent design {} does not exist'.format(parent))\n exit(-3)\n\n if name is None and new:\n name = click.prompt('Please enter a name for this design', type=str)\n\n if description is None and new:\n description = click.prompt('Please enter a description for this design', type=str)\n\n try:\n project.save_design(model, identifier, name=name, description=description, parent=parent, base_model=base_model,\n overwrite=overwrite)\n except ValidationError as exp:\n click.echo('Validation Error with design: {}'.format(exp))\n exit(-4)\n\n click.echo('Design successfully added to project')", "def importer(db):\n return Importer.objects.get(name=\"Test Importer 1\")", "def import_survey(self,sImportData,sImportDataType,sNewSurveyName=None,DestSurveyID=None):\n params = self.__format_params(locals().copy())\n method = \"import_survey\"\n r = self.call_rpc(method,params)\n return r.json()['result']", "def get_import_file(request):\n from seed.models import obj_to_dict\n\n import_file_id = request.GET.get('import_file_id', '')\n orgs = request.user.orgs.all()\n import_file = ImportFile.objects.get(\n pk=import_file_id\n )\n d = ImportRecord.objects.filter(\n super_organization__in=orgs, pk=import_file.import_record_id\n )\n # check if user has access to the import file\n if not d.exists():\n return {\n 'status': 'success',\n 'import_file': {},\n }\n\n f = obj_to_dict(import_file)\n f['name'] = import_file.filename_only\n f['dataset'] = obj_to_dict(import_file.import_record)\n # add the importfiles for the matching select\n f['dataset']['importfiles'] = []\n files = f['dataset']['importfiles']\n for i in import_file.import_record.files:\n files.append({\n 'name': i.filename_only,\n 'id': i.pk\n })\n # make the first element in the list the current import file\n i = files.index({\n 'name': import_file.filename_only,\n 'id': import_file.pk\n })\n files[0], files[i] = files[i], files[0]\n\n return {\n 'status': 'success',\n 'import_file': f,\n }", "def read_project_file():\n filename = input(\"Project file name: \")\n with open(filename, \"r\") as file:\n project = file_line_into_tuple(file.readline())\n \n print(project)\n return project", "def cmd_import(self):\n self.save()\n path = tkinter_filedialog.askopenfilename(\n initialdir=self.prefs[\"save_directory\"],\n filetypes=[(\"aeneas output ZIP file\", \".zip\"), (\"SMIL file\", \".smil\")],\n parent=self,\n title=\"Select aeneas output (SMIL or ZIP of SMILs)\"\n )\n if (path is not None) and (len(path) > 0) and (os.path.isfile(path)):\n if path.endswith(\".zip\"):\n self.import_zip_file(path)\n elif path.endswith(\".smil\"):\n self.import_smil_file(path)\n self.quit()", "def import_snapshot(self, snapshot:Snapshot):\n if snapshot.uid not in self.snapshot_ids:\n raise RuntimeError('This snapshot does not belong to the Experiment!')\n Task.init_import()\n # check out the relevant commit\n self.repo.head.reference = self.repo.commit(snapshot.commit_sha)\n self.repo.head.reset(index=True, working_tree=True)\n # import the correct file from the correct location\n backup_path = sys.path\n sys.path = [self.repo_path]\n module_name, _ = os.path.splitext(snapshot.filename)\n # the imported module triggers the other end of the mechanism\n importlib.import_module(module_name)\n # return to the original master head\n self.repo.head.reference = self.repo.heads[0]\n self.repo.head.reset(index=True, working_tree=True)\n # retrieve the imported object and clean up\n task_object = Task.retrieve_instance()\n sys.path = backup_path\n # before returning the object, link it with the Snapshot instance\n task_object.snapshot = snapshot\n return task_object", "def importStep(fileName):\n #Now read and return the shape\n try:\n rshape = Part.read(fileName)\n\n # Extract all solids and surfaces\n geometry = []\n for solid in rshape.Solids:\n geometry.append(Shape.cast(solid))\n\n for shell in rshape.Shells:\n geometry.append(Shape.cast(shell))\n\n return cadquery.Workplane(\"XY\").newObject(geometry)\n\n except:\n raise ValueError(\"STEP File Could not be loaded\")", "def test_researchstudy_1(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"]\n / \"researchstudy-example-ctgov-study-record.json\"\n )\n inst = researchstudy.ResearchStudy.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ResearchStudy\" == inst.resource_type\n\n impl_researchstudy_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ResearchStudy\" == data[\"resourceType\"]\n\n inst2 = researchstudy.ResearchStudy(**data)\n impl_researchstudy_1(inst2)", "def dsfinal2dsin(self):\n \n self.run_cmd('importInitial(\"dsfinal.txt\");')", "def _import(self):\n\t\tbpy.ops.import_scene.gltf(filepath=self.filename)", "def importToNewDocument(self, *args) -> \"adsk::core::Ptr< adsk::core::Document >\" :\n return _core.ImportManager_importToNewDocument(self, *args)", "def _load(self) -> None:\n mod: module.Module\n mod, _ = get_module()\n self._create_project_and_load(model=f\"import {mod.name}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and register a dataset dataset_id with studies.
def setup_dataset( dataset_id: str, gpf_instance: GPFInstance, *studies: GenotypeData, dataset_config_udate: str = "") -> GenotypeData: # pylint: disable=import-outside-toplevel from box import Box from dae.studies.study import GenotypeDataGroup dataset_config = { "id": dataset_id } if dataset_config_udate: config_update = yaml.safe_load(dataset_config_udate) dataset_config.update(config_update) dataset = GenotypeDataGroup( Box(dataset_config, default_box=True), studies) # pylint: disable=protected-access gpf_instance._variants_db.register_genotype_data(dataset) return dataset
[ "def _create_dataset(self, dataset_id: str) -> None:\n\n self.assert_gcp_dependencies()\n dataset = bigquery.Dataset(f\"{self.project_id}.{dataset_id}\")\n dataset.location = self.data_location\n self.bigquery_client.create_dataset(dataset, exists_ok=True)\n logging.info(f\"Created dataset with name: {dataset_id}\")", "def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)", "def _create_dataset_if_necessary(client, dataset_id):\n dataset_reference = bigquery.dataset.DatasetReference(client.project, dataset_id)\n try:\n dataset = client.get_dataset(dataset_reference)\n return\n except NotFound:\n pass\n dataset = bigquery.Dataset(dataset_reference)\n dataset.location = client.location\n print(f\"Creating dataset: {dataset_id}\")\n dataset = client.create_dataset(dataset)", "def insert_dataset(description):\n db = tkp.db.Database()\n dataset = alchemy_insert_dataset(db.session, description)\n db.session.add(dataset)\n db.session.commit()\n dataset_id = dataset.id\n return dataset_id", "def dataset_create(self, **kwargs):\n print(\"Creating RENKU dataset...\")\n opts = {\n \"dataset_name\": \"Dataset name\"\n }\n for key, val in opts.items():\n if key not in kwargs.keys():\n if key in self.__dict__.keys():\n kwargs[key] = self.__dict__[key]\n else:\n kwargs[key] = input(val + \": \")\n\n cmd = Command([self.renku_cli,\n 'dataset',\n 'create',\n kwargs[\"dataset_name\"]\n ]\n )\n print(cmd.stdout.read().decode() + cmd.stderr.read().decode())\n return self.__get_dataset_metadata(kwargs[\"dataset_name\"])", "def create_dataset(datasetName=None, actions=None, triggers=None, contentDeliveryRules=None, retentionPeriod=None, versioningConfiguration=None, tags=None):\n pass", "def create_dataset(body: Dataset):\n # TODO: Create dataset entry into database\n return dict(**body.dict(), id=1)", "def create(ds_name, description, tsuid_list):\n\n tdm = TemporalDataMgr()\n return tdm.import_data_set(data_set_id=ds_name, description=description, tsuid_list=tsuid_list)", "def store_dataset(group, name, obj):\n dset = group.create_dataset(name, **obj.kwds)\n update_attrs(dset, obj.attrs)", "def add_dataset(self, project_id, filename=None, label=None):\n return self.query(\"\"\"\n mutation addDatasetMutation($dataset: AddDatasetInput!) {\n addDataset(input: $dataset) {\n dataset {\n id\n label\n project {\n id\n }\n createdBy {\n id\n }\n locatorDict\n organization {\n id\n }\n }\n }\n }\n \"\"\",\n variables={\n 'dataset': {\n 'locatorDict': json.dumps({'filename': filename}) if filename else json.dumps({}),\n 'projectId': project_id,\n 'label': label\n }\n }\n )", "def get_dataset_id(self, dataset):\n raise NotImplementedError", "def create_dataset(cls, **kwargs):\n data = {\n 'dataset_name': 'test_dataset',\n 'group_name': 'test_group',\n 'method': 'prebuilt',\n 'prebuilt_train_images': os.path.join(cls.imageset_folder, 'train_images'),\n 'prebuilt_train_labels': os.path.join(cls.imageset_folder, 'train_labels'),\n 'prebuilt_val_images': os.path.join(cls.imageset_folder, 'val_images'),\n 'prebuilt_val_labels': os.path.join(cls.imageset_folder, 'val_labels'),\n 'prebuilt_mean_file': os.path.join(cls.imageset_folder, 'train_mean.binaryproto'),\n }\n data.update(kwargs)\n\n request_json = data.pop('json', False)\n url = '/datasets/images/generic'\n if request_json:\n url += '.json'\n\n rv = cls.app.post(url, data=data)\n\n if request_json:\n if rv.status_code != 200:\n print json.loads(rv.data)\n raise RuntimeError('Model creation failed with %s' % rv.status_code)\n return json.loads(rv.data)['id']\n\n # expect a redirect\n if not 300 <= rv.status_code <= 310:\n s = BeautifulSoup(rv.data, 'html.parser')\n div = s.select('div.alert-danger')\n if div:\n print div[0]\n else:\n print rv.data\n raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)\n\n job_id = cls.job_id_from_response(rv)\n\n assert cls.dataset_exists(job_id), 'dataset not found after successful creation'\n\n cls.created_datasets.append(job_id)\n return job_id", "def createDataset(filename, group, dataset, data):\n\n deleteDataset(filename, group, dataset)\n\n FILE = h5py.File(filename, \"r+\")\n\n GROUP = FILE[group]\n\n GROUP.create_dataset(dataset, data = data)\n\n print(\"[CREATE]: <{:s}> dataset in <{:s}> group created.\".format(dataset, group))\n\n FILE.close()", "def load_sample_dataset(dataset_id=\"d123\") -> Dataset:\n return load_dataset_from_attributes(\n dataset_id, load_sampledata_json(f\"{dataset_id}.json\")\n )", "def add_dataset(self, **kwargs) -> None:\n dataset = XLDataset(**kwargs)\n\n if dataset.split == \"training\":\n self.training.append(dataset)\n elif dataset.split == \"validation\":\n self.validation.append(dataset)\n elif dataset.split == \"test\":\n self.test.append(dataset)\n else:\n raise ValueError(f\"Unknown value for 'split' in \"\n \"{dataset.pxid}.\")", "def convert_dataset(self):\n self.create_dataset_specification_and_records()\n\n # Write the DatasetSpecification to the designated location.\n self.write_data_spec()", "def append(self, dataset, identifier):\n\n if isinstance(dataset, str):\n dataset = self._dataset_class(dataset_path=dataset)\n\n if not isinstance(dataset, self._dataset_class):\n raise CompatibilityException('Incompatible dataset. '\n 'You can only add instances of '\n 'type {}'.format(self._dataset_class))\n\n if len(dataset.description)>0:\n identifier = dataset.description\n\n if not self._is_init:\n self._ids = set(dataset.samplet_ids)\n self.targets = dataset.targets\n self._target_sizes = dataset.target_sizes\n\n self.num_samplets = len(self._ids)\n self._modalities[identifier] = dataset.data\n self.feature_names[identifier] = dataset.feature_names\n self.num_features.append(dataset.num_features)\n\n # maintaining a no-data pyradigm Dataset internally to reuse its methods\n self._dataset = copy(dataset)\n # replacing its data with zeros\n self._dataset.data = {id_: np.zeros(1) for id_ in self._ids}\n\n if hasattr(dataset, 'attr'):\n self._common_attr = dataset.attr\n self._common_attr_dtype = dataset.attr_dtype\n else:\n self._common_attr = dict()\n self._common_attr_dtype = dict()\n\n self._attr = dict()\n\n self._is_init = True\n else:\n # this also checks for the size (num_samplets)\n if set(dataset.samplet_ids) != self._ids:\n raise CompatibilityException(\n 'Differing set of IDs in two datasets.'\n ' Unable to add this dataset to the MultiDataset.')\n\n if dataset.targets != self.targets:\n raise CompatibilityException(\n 'Targets for some IDs differ in the two datasets.'\n ' Unable to add this dataset to the MultiDataset.')\n\n if identifier not in self._modalities:\n self._modalities[identifier] = dataset.data\n self.feature_names[identifier] = dataset.feature_names\n self.num_features.append(dataset.num_features)\n else:\n raise KeyError('{} already exists in MultiDataset'\n ''.format(identifier))\n\n if hasattr(dataset, 'attr'):\n if len(self._common_attr) < 1:\n # no attributes were set at all - simple copy sufficient\n self._common_attr = dataset.attr.copy()\n self._common_attr_dtype = dataset.attr_dtype.copy()\n else:\n for a_name in dataset.attr:\n if a_name not in self._common_attr:\n self._common_attr[a_name] = dataset.attr[a_name]\n self._common_attr_dtype[a_name] = \\\n dataset.attr_dtype[a_name]\n elif self._common_attr[a_name] != dataset.attr[a_name]:\n raise ValueError(\n 'Values and/or IDs differ for attribute {}. '\n 'Ensure all datasets have common attributes '\n 'with the same values'.format(a_name))\n\n\n # each addition should be counted, if successful\n self.modality_count += 1", "def add_dataset_spec(self, datasource_spec_id, name, description=None, tags=[]):\n return self.query(\"\"\"\n mutation addDatasetSpecMutation($datasetSpec: AddDatasetSpecInput!) {\n addDatasetSpec(input: $datasetSpec) {\n datasetSpec {\n id\n datasourceSpecId\n name\n description\n tags\n }\n }\n }\n \"\"\",\n variables={\n \"datasetSpec\": {\n \"datasourceSpecId\": datasource_spec_id,\n \"name\": name,\n \"description\": description,\n \"tags\": tags\n }\n })", "def create_inference_dataset(waveform_dataset, settings):\n \n \n dataset = _create_spectrogram_dataset(waveform_dataset, settings)\n \n dataset = dataset.map(\n _diddle_inference_example,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n \n return dataset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert base32 encoded secret string to bytes
def secret_to_bytes(secret): return base64.b32decode(secret)
[ "def secret_data_encode_bytes(data: bytes) -> bytes:\n return base64.b64encode(data)", "def encode_to_b16(inp: str) -> bytes:\n encoded = inp.encode(\"utf-8\") # encoded the input (we need a bytes like object)\n b16encoded = base64.b16encode(encoded) # b16encoded the encoded string\n return b16encoded", "def unpibble32(text: str) -> bytes:\n encoded: bytes = bytes(text, \"ascii\")\n table: bytes = bytes.maketrans(\n b\"0123456789bcdfghjklmnopqrstvwxyz\",\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\",\n )\n return base64.b32decode(encoded.translate(table))", "def secret_data_encode(data: Union[bytes, str]) -> str:\n if isinstance(data, str):\n data = data.encode('utf-8')\n return secret_data_encode_bytes(data).decode(\"utf-8\")", "def pibble32(data: bytes) -> str:\n table: bytes = bytes.maketrans(\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\",\n b\"0123456789bcdfghjklmnopqrstvwxyz\",\n )\n encoded: bytes = base64.b32encode(data)\n return str(encoded.translate(table), \"ascii\")", "def Decode(cls, base32_string):\n result = []\n for c in base32_string:\n result.append('{0:05b}'.format(cls.BASE32_REVERSED[c.upper()]))\n return ''.join(result)", "def bytes_key(string):\n return key_to_bytes(key(string))", "def Encode(cls, binary_string):\n assert cls.GetPaddingLength(len(binary_string)) == 0\n result = []\n for i in range(0, len(binary_string), cls.BASE32_BIT_WIDTH):\n result.append(cls.BASE32_ALPHABET[\n int(binary_string[i:i + cls.BASE32_BIT_WIDTH], 2)])\n return ''.join(result)", "def test_str(self):\n key = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n expected = str(binascii.hexlify(self.bytes_a))\n observed = str(key)\n self.assertEqual(expected, observed)", "def string_to_bytes(text):\n return bytes([ord(c) for c in text])", "def human_bytes(byte_string):\n return base64.b32encode(byte_string).strip('=').lower()", "def string_to_binary(str):\n return bin(int(binascii.hexlify(str.encode()), 16))[2:]", "def base58_decode(encoded: str) -> bytes:\n int_v = 0\n for char in encoded:\n int_v = int_v * 58 + _BASE58_ALPHA_IDXS[char]\n return int_v.to_bytes((int_v.bit_length() - 1) // 8 + 1, byteorder='big')[1:]", "def hex2bytes(x):\n if type(x) != type(b'h'):\n x = bytes(x)\n if len(x) % 2 == 1:\n x = b'0' + x\n alph = b'0123456789abcdef'\n return bytes([16*alph.index(x[2*i]) + alph.index(x[2*i+1]) for i in range(int(len(x)/2))])", "def make_totp_secret():\n return pyotp.random_base32()", "def _decode_base58(s: str, length: int) -> bytes:\n decoded = 0\n multi = 1\n s = s[::-1]\n for char in s:\n decoded += multi * BASE58_ALPHABET.index(char)\n multi = multi * 58\n\n return decoded.to_bytes(length, byteorder='big')", "def getSecretKey(self) -> bytes:\r\n return self.secretKey", "def ascii2binary(s):\n #return bin(int.from_bytes(s.encode(), 'big'))[2:] # Doesn't account for padding\n b, buff = \"\", \"\"\n for c in s:\n buff = bin(ord(c))[2:]\n while len(buff) % 8 != 0:\n buff = \"0\" + buff\n b += buff\n return b", "def tobinhex(data):\n\n s_data = unicode(data).encode('utf-8')\n hex_data = base64.b16encode(s_data)\n l_binhex=[]\n\n for i in range(len(hex_data))[::2]:\n l_binhex.append(hex_data[i:i+2])\n\n return '\"%s\"' % ' '.join(l_binhex).upper()", "def encrypt(data: str, secret_text: bytes) -> Tuple[str, str, str]:\n cipher = AES.new(secret_text, AES.MODE_EAX)\n ciphertext, tag = cipher.encrypt_and_digest((data.encode(\"utf-8\")))\n return (base64.b64encode(ciphertext).decode(\"ascii\"),\n base64.b64encode(tag).decode(\"ascii\"),\n base64.b64encode(cipher.nonce).decode(\"ascii\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list l2 and a list of indices l1, yield all tuples (x, y) s.t. x is an index in l1 and y is l2[x].
def special_product(l1, l2): for i in l1: for j in range(0, len(l2[i])): yield (i, j)
[ "def indexpositions(list1,int1):\n list2=[]\n for i in range(len(list1)):\n if int1==list1[i]:\n list2.append(i)\n return list2", "def cartesian( v1, v2 ):\n return tuple([(x,y) for x in v1 for y in v2])", "def _get_pair_list(queries, docs, labels, _make_indexed):\n while True:\n j=0\n for q, doc, label in zip(queries, docs, labels):\n doc, label = (list(t) for t in zip(*sorted(zip(doc, label), reverse=True)))\n for item in zip(doc, label):\n if item[1] == 1:\n for new_item in zip(doc, label):\n if new_item[1] == 0:\n j+=1\n yield(_make_indexed(q), _make_indexed(item[0]), _make_indexed(new_item[0]))", "def idx_zip(a: List[Tuple[int, Any]],\n b: List[Tuple[int, Any]],\n c: List[Tuple[int, Any]] = None) -> Union[\n List[Tuple[int, Any, Any]], List[Tuple[int, Any, Any, Any]], None]:\n idx_a = [i for i, _ in a]\n idx_b = [i for i, _ in b]\n\n if not idx_a.sort() == idx_b.sort():\n return None\n\n a.sort(key = lambda e: e[0])\n b.sort(key = lambda e: e[0])\n\n zipped_res = zip(a, b)\n\n if c is not None:\n idx_c = [i for i, _ in c]\n if not idx_c.sort() == idx_a.sort():\n return None\n\n c.sort(key=lambda e: e[0])\n zipped_res = zip(zipped_res, c)\n\n return [(i, a_i, b_i, c_i) for ((i, a_i), (_, b_i)), (_, c_i) in zipped_res]\n\n return [(i, a_i, b_i) for (i, a_i), (_, b_i) in zipped_res]", "def inlj(outer, index):\n for row in outer:\n for irow in index.get(row[0], []):\n yield (row, irow)", "def pair_iter(mat1, mat2):\n\n assert_same_size(mat1, mat2)\n \n for (x, y), our_cell in mat1:\n other_cell = mat2.get_cell(x, y)\n yield (x, y), (our_cell, other_cell)", "def returnIteratorIndexesFromIndex(cls, listOfIndexes):", "def split_indices(l,lookup):\n within,without = [],[]\n for (i,v) in enumerate(l):\n try:\n ind = lookup.index(v)\n within.append((i,ind))\n except ValueError: # v not found in lookup\n without.append((i,v))\n return within,without", "def cartesian(lst1, lst2):\r\n list3 = []\r\n for i in range(len(lst1)):\r\n for j in range(len(lst2)):\r\n list3.append([lst1[i],lst2[j]]) #add in a loop each component\r\n #within lst1 to each component in lst2\r\n return list3", "def find_similar_pairs(self, lsh, ids):\n id_list = list(ids)\n if len(id_list) < 2:\n return\n for id1 in id_list:\n for id2 in id_list:\n if id1 < id2:\n yield (id1, id2), 1", "def pairs(l):\n for i in range(int(len(l) / 2)):\n yield l[2*i], l[2*i+1]", "def mult_tuple(tuple1, tuple2):\n res = [(b, a) for a in tuple1 for b in tuple2] + [\"end\"]\n res = res + [(b, a) for a in tuple1 for b in tuple2]\n #2 methods- second from course notes\n list = []\n for a in range(len(tuple1)):\n for b in range(len(tuple2)):\n list = list , (tuple1[a],tuple2[b]) , (tuple2[b],tuple1[a])\n \n print(tuple(res))\n print(list)\n\n return None", "def pairs(\n x_coordinates: Iterable[float], y_coordinates: Iterable[float]\n) -> tuple[tuple[float, float], ...]:\n pairs = tuple(zip(x_coordinates, y_coordinates))\n return pairs", "def merge_indexes(self, match_index_pairs):\n\n def overlapping(index_a, index_b):\n if index_a.end > index_b.start and index_a.start < index_b.end:\n return True\n if index_a.start < index_b.end and index_b.start < index_a.end:\n return True\n if index_a.start < index_b.start and index_a.end > index_b.end:\n return True\n if index_b.start < index_a.start and index_b.end > index_a.end:\n return True\n\n def merge_pairs(index_a, index_b):\n start = 0\n if index_a.start < index_b.start:\n start = index_a.start\n else:\n start = index_b.start\n if index_a.end < index_b.end:\n end = index_b.end\n else:\n end = index_a.end\n return StartStopIndex(start, end, [index_a.value, index_b.value])\n\n for pair in match_index_pairs:\n overlap = False\n match_index_pairs.remove(pair)\n for check_pair in match_index_pairs:\n if overlapping(pair, check_pair):\n overlap = True\n match_index_pairs.remove(check_pair)\n match_index_pairs.append(merge_pairs(pair, check_pair))\n break\n if not overlap:\n match_index_pairs.append(pair)", "def double_range(limit1, limit2): #y - x\n for i1 in range(limit1):\n for i2 in range(limit2):\n yield i1, i2", "def _idx2coord(indices: Iterable) -> tuple:\n return tuple([int(x1) - x2 for x1, x2 in zip(indices, center_offset)])", "def select_vector(x, y):\n\n if len(x) != len(y):\n raise ValueError(\"The two lists must contain the same number of elements\")\n \n returned_list = []\n \n for i, index in enumerate(y):\n \n if index not in range(-len(x[i]), len(x[i])):\n raise ValueError(\" At least one of the indices is out of bounds\")\n \n returned_list.append(x[i][index])\n \n return returned_list", "def associate(first_list, second_list, offset=0, max_difference=0.02):\n potential_matches = [(abs(float(a[0]) - (float(b[0]) + offset)), ia, ib) # a[0] and b[0] extract the first element which is a timestamp \n for ia,a in enumerate(first_list) #for counter, value in enumerate(some_list)\n for ib,b in enumerate(second_list)\n if abs(float(a[0]) - (float(b[0]) + offset)) < max_difference]\n potential_matches.sort()\n matches = []\n first_flag = [False]*len(first_list)\n second_flag = [False]*len(second_list)\n for diff, ia, ib in potential_matches:\n if first_flag[ia] is False and second_flag[ib] is False:\n #first_list.remove(a)\n first_flag[ia] = True\n #second_list.remove(b)\n second_flag[ib] = True \n matches.append((ia, ib, diff)) \n matches.sort()\n return matches", "def cartesian_product(a,b):\n return [(x,y) for x in a for y in b ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the x position of the p'th word as if given an infinite environment.
def env_pos_x_virtual(self, p): return self.TEXT_MARGIN[0] + self.TEXT_SPACING[0] * p
[ "def xcoord(pt):\n return pt.x", "def get_pos_x(self):\n return self._position[0]", "def current_x(self):\n return self._current_position[0]", "def get_x(self):\r\n return self.get_3d_position()[\"position\"].x", "def get_x(self):\n\n return math.floor(self.position.x)", "def xposition(self):\n return self._xposition", "def _xdist(self):\n\t\treturn self.geom.x - self.last.x", "def hole_offset_x(self):\n self._hole_offset_x = self._hole_params[3].ToString()\n return self._hole_offset_x", "def _get_x_position_title(self, surface):\n return self._get_x_center_position_title() - surface.get_width() / 2", "def x(self):\n return self.center[0]", "def find_x(self, y):\n return (y-self.b)/self.m", "def Word_Point(word):\r\n if len(word) == 0:\r\n return None\r\n vowels = 'aeiouyAEIOUY'\r\n center = len(word)/2\r\n pattern = []\r\n i = 0\r\n while i<center:\r\n pattern = [i, -i]+pattern\r\n i+=1\r\n #print pattern\r\n for i in pattern:\r\n if word[i] in vowels:\r\n i_abs = i%len(word)\r\n return i_abs\r\n return center", "def min_x(self):\n return self.origin[0]", "def _get_x_center_position_title(self):\n return self.width / 2", "def getoriginx(self):\n return self.origin[0]", "def OriginX(self) -> float:", "def get_start(self) -> int:\n return self.__pos_x", "def get_xcoord(self, x):\n return (x - self.xlimits[0]) / self.dx", "def generate_pos(self, s):\n if len(self._interp_fcns) == 0:\n return None\n idx = self.get_segment_idx(s)\n if idx == 0:\n u_k = 0\n pos = self._interp_fcns[idx].interpolate(u_k)\n else:\n u_k = (s - self._s[idx - 1]) / (self._s[idx] - self._s[idx - 1])\n pos = self._interp_fcns[idx - 1].interpolate(u_k)\n return pos" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the ordered list of amino acid. This convention is based on the BLOSUM matrix in biopython and assumed for the binned distribution presenting amino acid contribution to differential selection at a site
def _get_aa_ordered_list(): return list("ARNDCQEGHILKMFPSTWYV")
[ "def iterAACombs(n,alfabet): \n AAs = alfabet\n AAcombsList = []\n for i in xrange(2,n+1):\n for combs in itertools.combinations_with_replacement(AAs,i): #itertools.product(AAs, repeat=i): \n yield ''.join(sorted(combs))", "def aids(self):\n return self._aid_name_to_value.values()", "def get_acs_for_protein_seq(self, seq):\n md5 = seq_md5(seq)\n return [r['ac'] for r in self._fetchall(self._queries['acs_for_protein_md5'], [md5])] + ['MD5_' + md5]", "def define_proteinogenic_aas():\n global aas\n aas = list(string.ascii_uppercase)\n for no_aa in [\"B\", \"J\", \"O\", \"U\", \"X\", \"Z\"]:\n aas.remove(no_aa)", "def aids(self):\n return self._aids", "def generateBasisSetOrders(self):\n\t\torderList = []\n\t\tL = self.L\n\t\ti = 0\n\t\tbasisSetOrder = infor.getBasisSetOrder()\n\t\tif basisSetOrder == \"libint\":\n\t\t\twhile i <= L:\n\t\t\t\tnx = L - i\n\t\t\t\tj = 0\n\t\t\t\twhile j<=i: \n\t\t\t\t\tny = i-j\n\t\t\t\t\tnz = j\n\t\t\t\t\torderList.append(nx)\n\t\t\t\t\torderList.append(ny)\n\t\t\t\t\torderList.append(nz)\n\t\t\t\t\tj = j + 1\n\t\t\t\ti = i + 1\n else:\n\t\t\tprint \"Unrecognized basis set ordering to generate basis sets\\n\"\n\t\t\tsys.exit()\n\n\t\treturn orderList", "def get_aa(codons, genetic_code):\n\n # if there are no codons\n if codons==\"-\": return \"-\"\n\n # if it is a protein\n elif len(codons)%3==0: return str(Seq(codons).translate(table = genetic_code))\n\n # if not\n else: \n if len(codons)<3: return \"X\"\n else: return (str(Seq(\"\".join(list(chunks(codons, 3))[0:-1])).translate(table = genetic_code)) + \"X\")", "def create_full_amino_acid_build_dict(atom_name_dict, bond_angle_dict):\n AMINO_ACID_INFO = {}\n for AA, build_order_chains in BUILD_ORDER_CHAINS.items():\n AMINO_ACID_INFO[AA] = {}\n # Add bonds\n bonds_names = []\n bonds_types = []\n bond_lens = []\n AMINO_ACID_INFO[AA][\"atom-names\"] = set()\n for i, chain in enumerate(build_order_chains):\n # This corresponds to a normal chain, beginning with a CA\n if i == 0:\n prev_bond_atom = \"CA\"\n else:\n j = 0\n while build_order_chains[0][j] == build_order_chains[1][j]:\n j += 1\n prev_bond_atom = chain[j-1]\n chain = chain[j:]\n for atom_name in chain:\n AMINO_ACID_INFO[AA][\"atom-names\"].add(atom_name)\n cur_bond = [prev_bond_atom, atom_name]\n cur_bond_names = \"-\".join(cur_bond)\n bonds_names.append(cur_bond_names)\n cur_bond_types = \"-\".join([atom_name_dict[AA][an] for an in cur_bond])\n bonds_types.append(cur_bond_types)\n\n try:\n bond_lens.append(bond_angle_dict[\"bonds\"][cur_bond_types])\n except KeyError:\n try:\n cur_bond_types = \"-\".join(cur_bond_types.split(\"-\")[::-1])\n bond_lens.append(bond_angle_dict[\"bonds\"][cur_bond_types])\n except KeyError:\n bond_lens.append(\"?\")\n prev_bond_atom = atom_name\n AMINO_ACID_INFO[AA][\"bonds-names\"] = bonds_names\n AMINO_ACID_INFO[AA][\"bonds-types\"] = bonds_types\n AMINO_ACID_INFO[AA][\"bonds-vals\"] = bond_lens\n AMINO_ACID_INFO[AA][\"atom-names\"] = [b.split(\"-\")[1] for b in AMINO_ACID_INFO[AA][\"bonds-names\"]]\n\n angles_names = []\n angles_types = []\n angles_vals = []\n for i, chain in enumerate(build_order_chains):\n prev_2_atoms = [\"N\", \"CA\"]\n if i == 1:\n j = 0\n cur_angles = [*prev_2_atoms, chain[j]]\n while \"-\".join(cur_angles) in angles_names and j < len(chain)-1:\n prev_2_atoms = [prev_2_atoms[1], chain[j]]\n cur_angles = [*prev_2_atoms, chain[j+1]]\n j += 1\n chain = chain[j:]\n\n\n for atom_name in chain:\n cur_angles = [*prev_2_atoms, atom_name]\n cur_angles_names = \"-\".join(cur_angles)\n angles_names.append(cur_angles_names)\n cur_angles_types = \"-\".join([atom_name_dict[AA][an] for an in cur_angles])\n angles_types.append(cur_angles_types)\n try:\n angles_vals.append(bond_angle_dict[\"angles\"][cur_angles_types])\n except KeyError:\n try:\n cur_angles_types = \"-\".join(cur_angles_types.split(\"-\")[::-1])\n angles_vals.append(bond_angle_dict[\"angles\"][cur_angles_types])\n except KeyError:\n angles_vals.append(\"?\")\n prev_2_atoms = [prev_2_atoms[-1], atom_name]\n AMINO_ACID_INFO[AA][\"angles-names\"] = angles_names\n AMINO_ACID_INFO[AA][\"angles-types\"] = angles_types\n AMINO_ACID_INFO[AA][\"angles-vals\"] = [np.radians(x) if x != \"?\" else x for x in angles_vals]\n\n torsion_names = []\n torsion_types = []\n torsion_vals = []\n for i, chain in enumerate(build_order_chains):\n prev_3_atoms = [\"C\", \"N\", \"CA\"]\n if i == 1:\n j = 0\n cur_torsion = [*prev_3_atoms, chain[j]]\n while \"-\".join(cur_torsion) in torsion_names and j < len(chain) - 1:\n prev_3_atoms = [prev_3_atoms[-2], prev_3_atoms[-1], chain[j]]\n cur_torsion = [*prev_3_atoms, chain[j + 1]]\n j += 1\n chain = chain[j:]\n for atom_name in chain:\n cur_torsion = [*prev_3_atoms, atom_name]\n cur_torsion_names = \"-\".join(cur_torsion)\n torsion_names.append(cur_torsion_names)\n cur_torsion_types = \"-\".join(\n [atom_name_dict[AA][an] for an in cur_torsion])\n torsion_types.append(cur_torsion_types)\n # Add planar torsion angles\n if AA in KNOWN_TORSION_VALS and cur_torsion_names in KNOWN_TORSION_VALS[AA]:\n torsion_vals.append(KNOWN_TORSION_VALS[AA][cur_torsion_names])\n # Mark certain torsion angles as inferred\n elif (AA == \"ARG\" and cur_torsion_names == 'CD-NE-CZ-NH2') or (\n AA == \"ASN\" and cur_torsion_names == 'CA-CB-CG-ND2') or (\n AA == \"ASP\" and cur_torsion_names == 'CA-CB-CG-OD2') or (\n AA == \"GLN\" and cur_torsion_names == 'CB-CG-CD-NE2') or (\n AA == \"GLU\" and cur_torsion_names == 'CB-CG-CD-OE2'):\n torsion_vals.append(\"i\")\n else:\n torsion_vals.append(\"p\")\n prev_3_atoms = [prev_3_atoms[-2], prev_3_atoms[-1], atom_name]\n AMINO_ACID_INFO[AA][\"torsion-names\"] = torsion_names\n AMINO_ACID_INFO[AA][\"torsion-types\"] = torsion_types\n AMINO_ACID_INFO[AA][\"torsion-vals\"] = [np.radians(x) if x not in [\"p\", \"i\"] else x for x in torsion_vals]\n\n return AMINO_ACID_INFO", "def sortedListOfIndexAssyRec(self):\n l = []\n for ar in self:\n l.append((ar.orgpos, ar))\n # sort\n l.sort(key=lambda k: k[0])\n # done\n return l", "def convert2aa(sequence):\r\n\r\n # sequence = \"\".join([x.upper() for x in sequence]) # converts lowercase to uppercase\r\n\r\n number_of_codons = len(sequence)/3\r\n aa_seq = []\r\n\r\n for nmbr in list(range(1, int(number_of_codons)+1)): # goes through each codon converting it to an aa\r\n\r\n if \"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3] in codon2aa:\r\n aa_seq.append(codon2aa[\"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3]])\r\n else:\r\n aa_seq.append(\"XXX\")\r\n\r\n return \"\".join(aa_seq)", "def ADNProfile(self):\n sequences = self.__FASTA_sequencesToMatrix()\n length = len(sequences[0])\n\n nucleotides = {\"A\": [0]*length, \"T\": [0] *\n length, \"C\": [0]*length, \"G\": [0]*length}\n\n for j in range(length):\n\n for i in range(len(sequences)):\n # increment nucleotide vertically\n # j lenth of cols of list profile & matrix\n nucleotides[sequences[i][j]][j] += 1\n # sequences Content matrix of nucleotide\n # [A,A,C,T,G]\n # [C,G,G,A,A]\n # [A,T,A,T,T]\n # [G,C,G,T,A]\n\n return [nucleotides, length]", "def _get_all_aids(ibs):\n all_aids = ibs.db.get_all_rowids(ANNOTATION_TABLE)\n return all_aids", "def order(self):\n\n return xroms.order(self.da)", "def get_encounter_aids(ibs, eid_list):\n gids_list = ibs.get_encounter_gids(eid_list)\n aids_list_ = ibsfuncs.unflat_map(ibs.get_image_aids, gids_list)\n aids_list = list(map(utool.flatten, aids_list_))\n #print('get_encounter_aids')\n #print('eid_list = %r' % (eid_list,))\n #print('gids_list = %r' % (gids_list,))\n #print('aids_list_ = %r' % (aids_list_,))\n #print('aids_list = %r' % (aids_list,))\n return aids_list", "def get_abreviations(self):\n _ = AminoAcid.objects.filter(amino_acid = self.first)[0]\n first_abbreviation = DataAminoAcids.objects.filter(name = _).all()[0].first_abbreviation\n linear_abbreviations_qs = AminoAcid.objects.filter(amino_acid__in=self.linear)\\\n .values_list('data__linear_abbreviation')\n methylated_abbreviations_qs = AminoAcid.objects.filter(amino_acid__in=self.methylated)\\\n .values_list('data__methylated_abbreviation')\n linear_abbreviations = list(map(lambda x: x[0], linear_abbreviations_qs))\n methylated_abbreviations = list(map(lambda x: x[0], methylated_abbreviations_qs))\n abbreviations = linear_abbreviations + methylated_abbreviations\n return first_abbreviation, abbreviations", "def _count_aa(self):\n\t\ttable = {}\n\t\tfor codon in self.codon_table:\n\t\t\taa = codon_to_aa[codon]\n\t\t\tif aa not in table: # check to see if the amino acid has already been added to the table\n\t\t\t\ttable[aa] = int(self.codon_table[codon]) # if so, update the existing count\n\t\t\telse: # if the amino acid hasn't been added to the table\n\t\t\t\ttable[aa] += int(self.codon_table[codon]) # add the amino acid to the table and assign a count\n\t\treturn table", "def encode_DNA(seq):\n\tseq2bin_dict = {'A':[0,0], 'C':[0,1], 'G':[1,0], 'T':[1,1]}\n\treturn np.array(sum([seq2bin_dict.get(nuc) for nuc in seq], []))", "def ncbi_GetSeqsFromAcc(self, table_name, column_name='acc_id'):\n\n print(\n \"\"\"\n #########################################################\\n\n ############ NCBI ncbi accession to fasta #############\\n\n #########################################################\\n\n \"\"\")\n\n Entrez.api_key = self._key\n Entrez.email = self._email\n\n\n try:\n conn = sqlite3.connect(self.sqlite_db)\n cur = conn.cursor()\n except sqlite3.Error as e:\n print(e)\n return\n\n #set up sqlite\n cur.execute('''CREATE TABLE IF NOT EXISTS Acc2Seq (rowid INT PRIMARY KEY, acc_id TEXT, seq_description TEXT, sequences TEXT)''')\n\n try:\n # select the field contain acc id\n cur.execute('''SELECT {} FROM {}'''.format(column_name, table_name))\n all_acc = cur.fetchall()\n len_all_acc = len(all_acc)\n except sqlite3.Error as e:\n print(\"Error. Reading {} error\\n\".format(table_name))\n print(e)\n return\n\n cur.execute('''SELECT acc_id FROM Acc2Seq''')\n existed = cur.fetchall()\n if len(existed) > 0:\n existed_id = [i[0] for i in existed]\n else:\n existed_id = []\n\n if len_all_acc > 0:\n\n all_acc_flat = [i[0] for i in all_acc]\n\n print('\\nTotal Accession Numbers: {}\\n'.format(len(all_acc_flat)))\n n = len(existed_id)\n for i in range(len(existed_id), len_all_acc):\n\n current_id = all_acc_flat[i]\n\n if current_id in existed_id:\n print(\"{} existed in the database\".format(current_id))\n continue\n else:\n\n #Total number of records from the input set to be retrieved, up to a maximum of 10,000. \n if current_id == 'NA':\n cur.execute('''INSERT INTO Acc2Seq VALUES (?,?,?,?)''', (n, current_id, 'NA', 'NA'))\n conn.commit()\n n += 1\n else:\n try:\n fetch = Entrez.efetch(db = self.ncbi_db, id = current_id, retmode = 'text', rettype = 'fasta')\n outs = fetch.read()\n except:\n print(\"Entrez Error\")\n\n\n fetch.close()\n fasta = outs.strip().split('\\n')\n\n if len(fasta) > 1:\n\n header = fasta[0]\n acc, descript = header.split()[0].replace('>', ''), ' '.join(header.split()[1:])\n seqs = ''.join(fasta[1:])\n\n print('Saving into database:')\n print('{} Acc_ID: {}\\n'.format(i+1, acc))\n cur.execute('''INSERT INTO Acc2Seq VALUES (?,?,?,?)''', (n, current_id, descript, seqs))\n conn.commit()\n n += 1\n time.sleep(3)\n\n else:\n print('Empty sequences')\n cur.execute('''INSERT INTO Acc2Seq VALUES (?,?,?,?)''', (n, current_id, \"NA\", \"NA\"))\n conn.commit()\n n += 1\n time.sleep(3)\n else:\n print(\"No Accession ID in the Database. Please Check!\")\n return\n\n cur.close()\n conn.close()\n print('\\nCompleted!\\n')\n return self.track.append('P3')", "def make_bitarray(self):\n macs_file = open(self.macs_file_name, 'r')\n alleles_bits = bitarray()\n position = []\n for line in macs_file:\n if re.match('SITE', line):\n columns = line.split('\\t')\n site_alleles = columns[4].strip()\n alleles_bits.extend(site_alleles)\n position.append(columns[2])\n macs_file.close()\n return [alleles_bits,position]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the default 40x40 cost matrix based on BLOSUM62 and assigns maximum cost to transport between opposite signed differential selection contributions. Returns
def get_cost_matrix(): substitution_matrix = substitution_matrices.load("BLOSUM62") alphabet_list = _get_aa_ordered_list() Naa = len(alphabet_list) # chosen so that range of costs in the # matrix is within an order of magnitude nthroot = 7.0 # maximum cost assigned by the cost matrix maxMij = np.exp(np.max(-substitution_matrix) / nthroot) cost_matrix = [] # first 20 rows for aa in alphabet_list: row = [-x / nthroot for x in substitution_matrix[aa, :][:Naa]] cost_row = (np.exp(row)).tolist() + [maxMij for i in range(Naa)] cost_matrix.append(cost_row) # last 20 rows for aa in alphabet_list: row = [-x / nthroot for x in substitution_matrix[aa, :][:Naa]] cost_row = [maxMij for i in range(Naa)] + (np.exp(row)).tolist() cost_matrix.append(cost_row) return cost_matrix
[ "def compute_B_matrix(s_data, t_data, s_branches, t_branches):\n\n\tdef compute_branch_mapping_cost(s_branch, t_branch, s_data, t_data):\n\t\t\"\"\" Gets the cost of mapping branch s to branch t. (i.e. Q(s_b, t_b))\n\t\t\tActually no -> It \"knows\" that a branch must be mapped to a certain other branch, i.e. the function\n\t\t\tchecks that the mapping of the two branches has the smallest cost.\n\t\t\tThis must be called at every branch comparison.\n\t\t\"\"\"\n\n\n\t\tdef compute_node_mapping_cost(s_node, t_node, s_branch, t_branch, s_data, t_data):\n\t\t\t\"\"\" Computes the cost of mapping a node of branch_s to a node of branch_t. (i.e. K(s_n, t_n))\n\t\t\t\tThis must be called at every node comparison between two branches.\n\t\t\t\"\"\"\n\n\t\t\t# Set the weight coefficients\n\t\t\ta1 = 1.0\n\t\t\ta2 = 2.0\n\t\t\ta3 = 0.2\n\t\t\ta4 = 1.0\n\t\t\t\n\n\t\t\t# Compute each K\n\t\t\tKpos = get_k_pos(s_node, t_node, s_data, t_data)\n\t\t\tKperc = get_k_perc(s_node, t_node, s_branch, t_branch, s_data, t_data)\n\t\t\tKdeg = get_k_deg(s_node, t_node, s_data, t_data)\n\t\t\tKbet = get_k_bet(s_node, s_data)\n\n\n\t\t\tK = a1*Kpos + a2*Kperc + a3*Kdeg - a4*Kbet\n\t\t\t#print(str(Kpos)+\" \"+str(Kperc)+\" \"+str(Kdeg)+\" \"+str(Kbet))\n\t\t\treturn K\n\n\n\t\tdef find_optimal_nodes(costmatrix, s_branches, t_branches):\n\t\t\t\"\"\" This function gets the minimal value in each row, \n\t\t\t\ti.e. the minimal value for the nodes of the source skeleton to be mapped.\n\t\t\t\"\"\"\n\t\t\tcosts = []\n\t\t\tmapping = []\n\t\t\tinf = float('inf')\n\n\t\t\tprint(s_branches)\n\t\t\tprint(t_branches)\n\t\t\t# print(len(costmatrix))\n\t\t\t# print(len(costmatrix[0]))\n\t\t\tprint(costmatrix)\n\t\t\t# print(len(costmatrix)==len(s_branches))\n\t\t\t# print(len(costmatrix[0]) == len(t_branches))\n\n\t\t\tfor i in range(len(costmatrix)):\n\t\t\t\tmin_cost = inf\n\t\t\t\tsrc = ''\n\t\t\t\tdst = ''\n\t\t\t\tfor j in range(len(costmatrix[0])):\n\t\t\t\t\tif costmatrix[i][j] < min_cost:\n\t\t\t\t\t\tmin_cost = costmatrix[i][j]\n\t\t\t\t\t\tsrc = s_branches[i]\n\t\t\t\t\t\tdst = t_branches[j]\n\t\t\t\tmapping.append([src,dst])\n\t\t\t\tprint([src, dst])\n\t\t\t\tcosts.append(min_cost)\n\n\t\t\ttotal_cost = sum(costs)\n\t\t\t# print(mapping)\n\t\t\treturn mapping, total_cost\n\n\n\n\t\t\n\n\t\t# Initialize the N matrix (containing node mapping costs, within a pair of branches)\n\t\tinf = float('inf')\n\t\tN = [[inf for x in range(len(t_branch))] for x in range(len(s_branch))]\n\n\t\t# Save the cost of mapping a node in the source branch to a node in the target branch, in N\n\t\tfor i in range(len(s_branch)):\n\t\t\tfor j in range(len(t_branch)):\n\t\t\t\ts_n = s_branch[i]\n\t\t\t\tt_n = t_branch[j]\n\t\t\t\tnode_cost = compute_node_mapping_cost(s_n, t_n, s_branch, t_branch, s_data, t_data)\n\t\t\t\tN[i][j] = node_cost\n\n\n\t\t\n\t\tbranch_mapping, branch_cost = find_optimal_nodes(N, s_branch, t_branch)\n\n\t\t# Nnp = np.array(N)\n\n\t\t# min_costs = np.amin(Nnp, axis=1)\n\t\t# sum_costs = np.sum(min_costs)\n\n\t\treturn branch_mapping, branch_cost\n\n\n\n\n\t# Initialize the B matrix (containing branch mapping costs), and another containing node pairs mappings\n\tinf = float('inf')\n\tB_costs = [[inf for x in range(len(t_branches))] for x in range(len(s_branches))]\n\tB_mappings = [[[] for x in range(len(t_branches))] for x in range(len(s_branches))]\n\n\t# Save the cost of mapping a branch in the source to a branch in the target, in B\n\tfor i in range(len(s_branches)):\n\t\tfor j in range(len(t_branches)):\n\t\t\ts_b = s_branches[i]\n\t\t\tt_b = t_branches[j]\n\t\t\tbranch_mapping, branch_cost = compute_branch_mapping_cost(s_b, t_b, s_data, t_data)\n\t\t\tB_costs[i][j] = branch_cost\n\t\t\tB_mappings[i][j] = branch_mapping \n\n\tB_dict = {'costs':B_costs, 'mappings':B_mappings}\n\n\t# Bnp = np.array(B)\n\n\treturn B_dict", "def estimate_cost(self, board):\n x = np.array(board.config)\n x_encoded = np.eye(board.N)[x].ravel()\n y = self.get_model().predict(x_encoded.reshape(1, -1)).item()\n return y", "def create_cost_matrix(self):\n n = self.g1.size()\n m = self.g2.size()\n cost_matrix = [[0 for i in range(n + m)] for j in range(n + m)]\n\n nodes1 = self.g1.node_list()\n nodes2 = self.g2.node_list()\n\n for i in range(n):\n for j in range(m):\n cost_matrix[i][j] = self.substitute_cost(nodes1[i], nodes2[j])\n\n for i in range(m):\n for j in range(m):\n cost_matrix[i+n][j] = self.insert_cost(i, j, nodes2)\n\n for i in range(n):\n for j in range(n):\n cost_matrix[j][i+m] = self.delete_cost(i, j, nodes1)\n\n self.cost_matrix = cost_matrix\n return cost_matrix", "def estimate_cost(self, board):\n pass", "def _create_cost_matrix(self):\n n = len(self.g)\n m = len(self.h)\n cost_matrix = np.zeros((n + m, n + m))\n\n nodes_1 = _get_nodes(self.g)\n nodes_2 = _get_nodes(self.h)\n\n for i in range(n):\n for j in range(m):\n cost_matrix[i, j] = self.substitute_cost(nodes_1[i], nodes_2[j])\n\n for i in range(m):\n for j in range(m):\n cost_matrix[i + n, j] = self.insert_cost(i, j)\n\n for i in range(n):\n for j in range(n):\n cost_matrix[j, i + m] = self.delete_cost(i, j)\n\n return cost_matrix", "def get_backhaul_costs(region, backhaul, costs, core_lut):\n backhaul_tech = backhaul.split('_')[0]\n geotype = region['geotype'].split(' ')[0]\n\n nodes = 0\n for asset_type in ['core_node', 'regional_node']:\n for age in ['new', 'existing']:\n combined_key = '{}_{}'.format(region['GID_id'], age)\n nodes += core_lut[asset_type][combined_key]\n node_density_km2 = nodes / region['area_km2']\n\n if node_density_km2 > 0:\n ave_distance_to_a_node_m = (math.sqrt(1/node_density_km2) / 2) * 1000\n else:\n ave_distance_to_a_node_m = math.sqrt(region['area_km2']) * 1000\n\n if backhaul_tech == 'microwave':\n if ave_distance_to_a_node_m < 15000:\n tech = '{}_{}'.format(backhaul_tech, 'small')\n cost = costs[tech]\n elif 15000 < ave_distance_to_a_node_m < 30000:\n tech = '{}_{}'.format(backhaul_tech, 'medium')\n cost = costs[tech]\n else:\n tech = '{}_{}'.format(backhaul_tech, 'large')\n cost = costs[tech]\n\n elif backhaul_tech == 'fiber':\n tech = '{}_{}_m'.format(backhaul_tech, geotype)\n cost_per_meter = costs[tech]\n cost = cost_per_meter * ave_distance_to_a_node_m\n\n else:\n print('Did not recognise the backhaul technology {}'.format(backhaul_tech))\n cost = 0\n\n return cost", "def _compute_trans_cap_cost(cls, trans_table, trans_costs=None,\n avail_cap_frac=1, max_workers=None,\n connectable=True, line_limited=False,\n sc_capacity_col='capacity'):\n scc = sc_capacity_col\n if scc not in trans_table:\n raise SupplyCurveInputError('Supply curve table must have '\n 'supply curve point capacity column'\n '({}) to compute lcot'.format(scc))\n\n if trans_costs is not None:\n trans_costs = TF._parse_dictionary(trans_costs)\n else:\n trans_costs = {}\n\n if max_workers is None:\n max_workers = os.cpu_count()\n\n logger.info('Computing LCOT costs for all possible connections...')\n groups = trans_table.groupby('sc_gid')\n if max_workers > 1:\n loggers = [__name__, 'reV.handlers.transmission', 'reV']\n with SpawnProcessPool(max_workers=max_workers,\n loggers=loggers) as exe:\n futures = []\n for sc_gid, sc_table in groups:\n capacity = cls._get_capacity(sc_gid, sc_table,\n connectable=connectable,\n sc_capacity_col=scc)\n futures.append(exe.submit(TC.feature_costs, sc_table,\n capacity=capacity,\n avail_cap_frac=avail_cap_frac,\n line_limited=line_limited,\n **trans_costs))\n\n cost = [future.result() for future in futures]\n else:\n cost = []\n for sc_gid, sc_table in groups:\n capacity = cls._get_capacity(sc_gid, sc_table,\n connectable=connectable,\n sc_capacity_col=scc)\n cost.append(TC.feature_costs(sc_table,\n capacity=capacity,\n avail_cap_frac=avail_cap_frac,\n line_limited=line_limited,\n **trans_costs))\n\n cost = np.hstack(cost).astype('float32')\n logger.info('LCOT cost calculation is complete.')\n\n return cost", "def generate_cost_matrix(revenue, maintenance, repair):\r\n return np.array([[maintenance, repair],\r\n [maintenance, revenue]])", "def create_cost(self):\n change_print_color.change('GRAY')\n print(\"\\nCreating Costs...\")\n\n # Action Cost\n weight = 1e0 # 1e-4\n target = None\n act_cost = {\n 'type': CostAction,\n 'wu': np.ones(self.action_dim) * weight,\n 'target': target, # Target action value\n }\n\n # # FK Cost\n # fk_l1_cost = {\n # 'type': CostFK,\n # 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n # 'target_pose': target_distance_hand,\n # 'tgt_data_type': 'state', # 'state' or 'observation'\n # 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n # 'op_point_name': hand_name,\n # 'op_point_offset': hand_offset,\n # 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n # 'joint_ids': bigman_params['joint_ids'][body_part_active],\n # 'robot_model': robot_model,\n # 'wp': np.array([3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'evalnorm': evall1l2term,\n # #'evalnorm': evallogl2term,\n # 'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n # 'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n # 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n # 'wp_final_multiplier': 1, # 10\n # }\n #\n # fk_l2_cost = {\n # 'type': CostFK,\n # 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n # 'target_pose': target_distance_hand,\n # 'tgt_data_type': 'state', # 'state' or 'observation'\n # 'tgt_idx': bigman_env.get_state_info(name='distance_hand')['idx'],\n # 'op_point_name': hand_name,\n # 'op_point_offset': hand_offset,\n # 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],\n # 'joint_ids': bigman_params['joint_ids'][body_part_active],\n # 'robot_model': robot_model,\n # # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos\n # #'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'wp': np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos\n # 'evalnorm': evall1l2term,\n # #'evalnorm': evallogl2term,\n # 'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target\n # 'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target\n # 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm\n # 'wp_final_multiplier': 1, # 10\n # }\n\n\n # State costs\n target_distance_object = np.zeros(2)\n # input(self.env.get_state_info())\n # input(self.env.get_state_info(name='tgt0')['idx'])\n state_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt0': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None,\n 'data_idx': self.env.get_state_info(name='tgt0')['idx']\n },\n },\n }\n\n state_final_cost_distance = {\n 'type': CostState,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': 1.0, # Weight for l1 norm\n 'l2': 1.0, # Weight for l2 norm\n 'alpha': 1e-5, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt0': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': target_distance_object, # Target state - must be set.\n 'average': None,\n 'data_idx': self.env.get_state_info(name='tgt0')['idx']\n },\n },\n }\n\n cost_safe_distance = {\n 'type': CostSafeDistance,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'tgt1': {\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'safe_distance': np.array([0.15, 0.15]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([1.0, 1.0]),\n 'data_idx': self.env.get_state_info(name='tgt1')['idx']\n },\n },\n }\n\n state_diff_weights = self.task_params['state_diff_weights']\n l1_l2_weights = np.array(self.task_params['l1_l2'])\n inside_cost = self.task_params['inside_cost']\n\n cost_state_difference = {\n 'type': CostStateDifference,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': l1_l2_weights[0], # Weight for l1 norm\n 'l2': l1_l2_weights[1], # Weight for l2 norm\n 'alpha': 1e-10, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'],\n 'idx_to_use': [0, 1, 2], # All: X, Y, theta\n 'wp': np.array(state_diff_weights), # State weights - must be set.\n 'average': None,\n 'target_state': 'tgt0', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt0')['idx'],\n },\n },\n }\n\n cost_final_state_difference = {\n 'type': CostStateDifference,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY\n 'evalnorm': evall1l2term, # TODO: ALWAYS USE evall1l2term\n 'l1': l1_l2_weights[0], # Weight for l1 norm\n 'l2': l1_l2_weights[1], # Weight for l2 norm\n 'alpha': 1e-10, # Constant added in square root in l1 norm\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'],\n 'idx_to_use': [0, 1, 2], # All: X, Y, theta\n 'wp': np.array(state_diff_weights), # State weights - must be set.\n 'average': None,\n 'target_state': 'tgt0', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt0')['idx'],\n },\n },\n }\n\n safe_radius = 0.15\n cost_safe_state_difference = {\n 'type': CostSafeStateDifference,\n 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'][:2],\n 'idx_to_use': [0, 1], # Only X and Y\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': 'tgt1', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt1')['idx'][:2],\n 'safe_distance': np.sqrt([safe_radius**2/2, safe_radius**2/2]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([inside_cost, inside_cost]),\n },\n },\n }\n\n cost_final_safe_state_difference = {\n 'type': CostSafeStateDifference,\n 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time.\n 'wp_final_multiplier': 1.0, # Weight multiplier on final time step.\n 'data_types': {\n 'ee': {\n 'data_idx': self.env.get_state_info(name='ee')['idx'][:2],\n 'idx_to_use': [0, 1], # Only X and Y\n 'wp': np.array([1.0, 1.0]), # State weights - must be set.\n 'target_state': 'tgt1', # Target state - must be set.\n 'tgt_idx': self.env.get_state_info(name='tgt1')['idx'][:2],\n 'safe_distance': np.sqrt([safe_radius**2/2, safe_radius**2/2]),\n 'outside_cost': np.array([0.0, 0.0]),\n 'inside_cost': np.array([inside_cost, inside_cost]),\n },\n },\n }\n\n\n # Sum costs\n # costs_and_weights = [(act_cost, 1.0e-1),\n des_weights = self.task_params['cost_weights']\n print('Costs weights:', des_weights)\n costs_and_weights = [(act_cost, des_weights[0]),\n # # (fk_cost, 1.0e-0),\n # (fk_l1_cost, 1.5e-1),\n # (fk_l2_cost, 1.0e-0),\n # # (fk_final_cost, 1.0e-0),\n # (fk_l1_final_cost, 1.5e-1),\n # (fk_l2_final_cost, 1.0e-0),\n (cost_state_difference, des_weights[1]),\n (cost_final_state_difference, des_weights[2]),\n (cost_safe_state_difference, des_weights[3]),\n (cost_final_safe_state_difference, des_weights[4]),\n # WORKING:\n # (cost_safe_distance, 1.0e+1),\n # (state_cost_distance, 5.0e-0),\n # (state_final_cost_distance, 1.0e+3),\n ]\n\n cost_sum = {\n 'type': CostSum,\n 'costs': [cw[0] for cw in costs_and_weights],\n 'weights': [cw[1] for cw in costs_and_weights],\n }\n\n return cost_sum", "def costFunction(self, i, j):\n\t\tif i==\"-\" or j==\"-\":\n\t\t\treturn -5;\n\t\treturn self.blosum45[i][j];", "def compute_branch_mapping_cost(s_branch, t_branch, s_data, t_data):\n\n\n\t\tdef compute_node_mapping_cost(s_node, t_node, s_branch, t_branch, s_data, t_data):\n\t\t\t\"\"\" Computes the cost of mapping a node of branch_s to a node of branch_t. (i.e. K(s_n, t_n))\n\t\t\t\tThis must be called at every node comparison between two branches.\n\t\t\t\"\"\"\n\n\t\t\t# Set the weight coefficients\n\t\t\ta1 = 1.0\n\t\t\ta2 = 2.0\n\t\t\ta3 = 0.2\n\t\t\ta4 = 1.0\n\t\t\t\n\n\t\t\t# Compute each K\n\t\t\tKpos = get_k_pos(s_node, t_node, s_data, t_data)\n\t\t\tKperc = get_k_perc(s_node, t_node, s_branch, t_branch, s_data, t_data)\n\t\t\tKdeg = get_k_deg(s_node, t_node, s_data, t_data)\n\t\t\tKbet = get_k_bet(s_node, s_data)\n\n\n\t\t\tK = a1*Kpos + a2*Kperc + a3*Kdeg - a4*Kbet\n\t\t\t#print(str(Kpos)+\" \"+str(Kperc)+\" \"+str(Kdeg)+\" \"+str(Kbet))\n\t\t\treturn K\n\n\n\t\tdef find_optimal_nodes(costmatrix, s_branches, t_branches):\n\t\t\t\"\"\" This function gets the minimal value in each row, \n\t\t\t\ti.e. the minimal value for the nodes of the source skeleton to be mapped.\n\t\t\t\"\"\"\n\t\t\tcosts = []\n\t\t\tmapping = []\n\t\t\tinf = float('inf')\n\n\t\t\tprint(s_branches)\n\t\t\tprint(t_branches)\n\t\t\t# print(len(costmatrix))\n\t\t\t# print(len(costmatrix[0]))\n\t\t\tprint(costmatrix)\n\t\t\t# print(len(costmatrix)==len(s_branches))\n\t\t\t# print(len(costmatrix[0]) == len(t_branches))\n\n\t\t\tfor i in range(len(costmatrix)):\n\t\t\t\tmin_cost = inf\n\t\t\t\tsrc = ''\n\t\t\t\tdst = ''\n\t\t\t\tfor j in range(len(costmatrix[0])):\n\t\t\t\t\tif costmatrix[i][j] < min_cost:\n\t\t\t\t\t\tmin_cost = costmatrix[i][j]\n\t\t\t\t\t\tsrc = s_branches[i]\n\t\t\t\t\t\tdst = t_branches[j]\n\t\t\t\tmapping.append([src,dst])\n\t\t\t\tprint([src, dst])\n\t\t\t\tcosts.append(min_cost)\n\n\t\t\ttotal_cost = sum(costs)\n\t\t\t# print(mapping)\n\t\t\treturn mapping, total_cost\n\n\n\n\t\t\n\n\t\t# Initialize the N matrix (containing node mapping costs, within a pair of branches)\n\t\tinf = float('inf')\n\t\tN = [[inf for x in range(len(t_branch))] for x in range(len(s_branch))]\n\n\t\t# Save the cost of mapping a node in the source branch to a node in the target branch, in N\n\t\tfor i in range(len(s_branch)):\n\t\t\tfor j in range(len(t_branch)):\n\t\t\t\ts_n = s_branch[i]\n\t\t\t\tt_n = t_branch[j]\n\t\t\t\tnode_cost = compute_node_mapping_cost(s_n, t_n, s_branch, t_branch, s_data, t_data)\n\t\t\t\tN[i][j] = node_cost\n\n\n\t\t\n\t\tbranch_mapping, branch_cost = find_optimal_nodes(N, s_branch, t_branch)\n\n\t\t# Nnp = np.array(N)\n\n\t\t# min_costs = np.amin(Nnp, axis=1)\n\t\t# sum_costs = np.sum(min_costs)\n\n\t\treturn branch_mapping, branch_cost", "def calculate_pmax_cost(daily_microgrid_prof: np.ndarray, \r\n contracted_p_tariffs: dict) -> float:\r\n \r\n # get all thresholds values and sort them\r\n power_thresholds = list(contracted_p_tariffs.keys())\r\n power_thresholds.sort()\r\n\r\n # if microgrid max. load (power) bigger than pmax values in contract, extrapolate \r\n if max(abs(daily_microgrid_prof)) > power_thresholds[-1]:\r\n return max(abs(daily_microgrid_prof)), \\\r\n contracted_p_tariffs[power_thresholds[-1]] + \\\r\n (max(abs(daily_microgrid_prof)) - power_thresholds[-1]) \\\r\n * (contracted_p_tariffs[power_thresholds[-1]] \\\r\n - contracted_p_tariffs[power_thresholds[-2]]) \\\r\n / (power_thresholds[-1] - power_thresholds[-2])\r\n # else find the pmax contract threshold corresponding to the microgrid \r\n # max. load (power)\r\n else:\r\n idx_p_threshold = np.where(power_thresholds \\\r\n >= max(abs(daily_microgrid_prof)))[0][0]\r\n return power_thresholds[idx_p_threshold], \\\r\n contracted_p_tariffs[power_thresholds[idx_p_threshold]]", "def _map_trans_capacity(trans_sc_table, sc_capacity_col='capacity'):\n\n nx = trans_sc_table[sc_capacity_col] / trans_sc_table['max_cap']\n nx = np.ceil(nx).astype(int)\n trans_sc_table['n_parallel_trans'] = nx\n\n if (nx > 1).any():\n mask = nx > 1\n tie_line_cost = (trans_sc_table.loc[mask, 'tie_line_cost']\n * nx[mask])\n\n xformer_cost = (trans_sc_table.loc[mask, 'xformer_cost_per_mw']\n * trans_sc_table.loc[mask, 'max_cap'] * nx[mask])\n\n conn_cost = (xformer_cost\n + trans_sc_table.loc[mask, 'sub_upgrade_cost']\n + trans_sc_table.loc[mask, 'new_sub_cost'])\n\n trans_cap_cost = tie_line_cost + conn_cost\n\n trans_sc_table.loc[mask, 'tie_line_cost'] = tie_line_cost\n trans_sc_table.loc[mask, 'xformer_cost'] = xformer_cost\n trans_sc_table.loc[mask, 'connection_cost'] = conn_cost\n trans_sc_table.loc[mask, 'trans_cap_cost'] = trans_cap_cost\n\n msg = (\"{} SC points have a capacity that exceeds the maximum \"\n \"transmission feature capacity and will be connected with \"\n \"multiple parallel transmission features.\"\n .format((nx > 1).sum()))\n logger.info(msg)\n\n return trans_sc_table", "def linear_sum_assignment(cost_matrix: torch.Tensor, max_size: int = 100):\n cost_matrix = cost_matrix.clone().detach()\n\n if len(cost_matrix.shape) != 2:\n raise ValueError(f\"2-d tensor is expected but got a {cost_matrix.shape} tensor\")\n if max(cost_matrix.shape) > max_size:\n raise ValueError(\n f\"Cost matrix size {cost_matrix.shape} is too large. The maximum supported size is {max_size}x{max_size}.\"\n )\n\n # The algorithm expects more columns than rows in the cost matrix.\n if cost_matrix.shape[1] < cost_matrix.shape[0]:\n cost_matrix = cost_matrix.T\n transposed = True\n else:\n transposed = False\n\n lap_solver = LinearSumAssignmentSolver(cost_matrix)\n f_int: int = 0 if 0 in cost_matrix.shape else 1\n\n # while step is not Done (step 0):\n # NOTE: torch.jit.scipt does not support getattr with string argument.\n # Do not use getattr(lap_solver, f\"_step{f_int}\")()\n while f_int != 0:\n if f_int == 1:\n f_int = lap_solver._step1()\n elif f_int == 2:\n f_int = lap_solver._step2()\n elif f_int == 3:\n f_int = lap_solver._step3()\n elif f_int == 4:\n f_int = lap_solver._step4()\n elif f_int == 5:\n f_int = lap_solver._step5()\n elif f_int == 6:\n f_int = lap_solver._step6()\n\n if transposed:\n marked = lap_solver.marked.T\n else:\n marked = lap_solver.marked\n row_index, col_index = torch.where(marked == 1)\n return row_index, col_index", "def _construct_adv_cost(self):\n match_cost = self.GN.compute_log_prob(Xd=self.match_target)\n adv_cost = -T.sum(match_cost) / self.obs_count\n return adv_cost", "def cost_subtree(self):\n r = {}\n for n in self.store.keys():\n _, t_avail = self.simulate_current(start=n)\n\n # Select the maximum t_avail from all calculated nodes\n _, r[n] = sorted(t_avail.items(), key=lambda x: x[1], reverse=True)[0]\n\n return r", "def _construct_other_reg_cost(self):\n act_reg_cost = (self.IN.act_reg_cost + self.GN.act_reg_cost)\n gp_cost = sum([T.sum(par**2.0) for par in self.gn_params])\n ip_cost = sum([T.sum(par**2.0) for par in self.in_params])\n param_reg_cost = self.lam_l2w[0] * (gp_cost + ip_cost)\n other_reg_cost = (act_reg_cost / self.obs_count) + param_reg_cost\n return other_reg_cost", "def init_cost_table(m, n):\n cost_table = [[0] * (n+1) for _ in range(m+1)]\n return cost_table", "def gettravelcostmap(nrows, ncols, header, CENTERLIST=CENTERLIST, COSTMAX=COSTMAX, \n TRAVELCOSTPATH=TRAVELCOSTPATH, TRAVELCOSTMAP=TRAVELCOSTMAP):\n with open(CENTERLIST, 'r') as p:\n centerlist = p.readlines()\n \n trcostdf = pd.DataFrame(index=xrange(nrows), columns=xrange(ncols)) #initialize costmap with nan\n trcostdf = trcostdf.fillna(COSTMAX) #initialize costmap with 999\n\n for i in xrange(99):\n (disW, disN, weight) = centerlist[i].strip('\\n').split(',')\n costmapfile = outfilename(disW, disN, TRAVELCOSTPATH, TRAVELCOSTMAP, \"NW\", 100)\n try:\n newtrcostdf = pd.read_csv(costmapfile, skiprows=6, header=None, sep=r\"\\s+\" ) #skip the 6 header lines\n print disW, disN, weight\n except IOError:\n print \"file not found: \", costmapfile\n continue\n trcostdf = np.minimum(trcostdf, newtrcostdf)\n \n # header = extractheader(HEADER)\n with open(TRCOSTMAP, 'w') as w:\n w.writelines(header)\n trcostdf.round() # round to integer\n trcostdf.to_csv(path_or_buf=TRCOSTMAP, sep=' ', index=False, header=False, mode = 'a') # append\n return trcostdf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the list of weights for computing the weighted sum of similarity scores in region [loc_start, loc_end]
def _get_weights(ds, metric, sample_factor, sfact_val1, sfact_val2, loc_start, loc_end): # Code assumes peptide annotation for location is called 'Loc' loc_sums1 = [] loc_sums2 = [] for loc in range(loc_start, loc_end + 1): ds1 = ds.loc[ dict( peptide_id=id_query(ds, f"Loc == '{loc}'", "peptide"), sample_id=id_query(ds, f"{sample_factor} == '{sfact_val1}'") ) ] diff_sel1 = ds1[metric].to_pandas().to_numpy().flatten() loc_sums1.append(0) for val in diff_sel1: loc_sums1[-1] = loc_sums1[-1] + abs(val) ds2 = ds.loc[ dict( peptide_id=id_query(ds, f"Loc == '{loc}'", "peptide"), sample_id=id_query(ds, f"{sample_factor} == '{sfact_val2}'") ) ] diff_sel2 = ds2[metric].to_pandas().to_numpy().flatten() loc_sums2.append(0) for val in diff_sel2: loc_sums2[-1] = loc_sums2[-1] + abs(val) loc_sums1 = loc_sums1 / np.sum(loc_sums1) loc_sums2 = loc_sums2 / np.sum(loc_sums2) weights = {} total = 0 for i, loc in zip(range(loc_end - loc_start + 1), range(loc_start, loc_end + 1)): val = min(loc_sums1[i], loc_sums2[i]) total = total + val weights[loc] = val weights = {k: v / total for k, v in weights.items()} return weights
[ "def compute_weights(self) -> list:\n weights = []\n for num in self.population:\n # Our purpose: find x with fitness value near 0 as much as possible\n # So if abs(x) is large, negative of it (weight) will be small\n weights.append(0 - abs(self.equation(num+self.offset))) # abs to find x near 0\n return weights", "def grade_region(lines_weight):\n grade = 0\n for weight in lines_weight:\n grade += weight\n return grade", "def region_sim_score(\n ds, metric, cost_matrix, sample_factor, sfact_val1, sfact_val2, loc_start, loc_end\n):\n\n weights = _get_weights(\n ds, metric, sample_factor, sfact_val1, sfact_val2, loc_start, loc_end\n )\n region_sim = 0\n for loc in range(loc_start, loc_end + 1):\n sim = weights[loc] * loc_sim_score(\n ds, metric, cost_matrix, sample_factor, sfact_val1, sfact_val2, loc\n )\n region_sim = region_sim + sim\n\n return region_sim", "def _weights_for_terms(self, terms):\n raise NotImplementedError", "def weighted_neighbour_calculation(distances, neighbours):\n # Initialise the lists used to store neighbour weights, and the fractions\n # to be used in calculating the test feature r value\n neighbour_weights = list()\n neighbour_distance_fractions = list()\n\n # For each of the neighbours\n for i in range(0, len(neighbours)):\n # Get the neighbour\n neighbour = neighbours[i]\n # Calculate the weight of the neighbour and append to weight list\n neighbour_weights.append((1 / distances[i]) * neighbour[12])\n # Append the neighbour distance fraction to the fraction list\n neighbour_distance_fractions.append(1 / distances[i])\n\n # Calculate the predicted r value of the test query by summing up all\n # values in both lists and dividing the weights by the distance fractions\n prediction = np.sum(np.array(neighbour_weights)) / np.sum(\n np.array(neighbour_distance_fractions))\n\n return prediction", "def _getWeights(self, a, b):\n assert a <= b, 'Interval boundaries are corrupt, got %f and %f' % (a, b)\n M = self.num_nodes\n weights = np.zeros(M)\n\n # Define temporary integration method using built-in Gauss-Legendre\n # -> will need this to compute the integral from a to b over the Lagrangian polynomials\n [nodes_m, weights_m] = self._GaussLegendre(np.ceil(M / 2), a, b)\n\n # for each node, build Lagrangian polynomial in Newton base, evaluate at temp. integration nodes and integrate\n for j in np.arange(M):\n coeff = np.zeros(M)\n coeff[j] = 1.0\n poly = self._poly_newton(coeff)\n eval_pj = self._evaluate_horner(nodes_m, poly)\n weights[j] = self.evaluate(weights_m, eval_pj)\n\n return weights", "def compute_sample_weight(class_weight, y, *, indices=...):\n ...", "def weight_calculation(X_train, Y_train, lambda_end, lambda_start = 0):\n \n # Specify lambda start and lambda end\n lambda_values = np.arange(lambda_start,lambda_end + 1)\n \n # List to store weights with varying lambdas\n weights_varying_lambda =[]\n\n for lambda_value in lambda_values:\n # Transposing X_train, give as a numpy array\n X_train_transpose = np.transpose(X_train) \n \n #Multiply X'*X\n XTX_train = np.dot(X_train_transpose, X_train)\n \n # Identity Matrix\n I = np.identity(len(XTX_train))\n\n # Multiply lambda by the identity Matrix\n Lambda_times_I = np.dot(lambda_value, I)\n \n # Add X'X Matrix and lambda*I Matrix\n XTX_plus_I = XTX_train + Lambda_times_I\n \n # Take the multiplicative inverse of the matrix (X'X + lambda*I)\n XTX_plus_I_inv = np.linalg.inv(XTX_plus_I)\n \n # Mutliply X'*Y\n XTY_train = np.dot(X_train_transpose, Y_train)\n \n # Solve for w = (X'X + lambda(I))^-1 * X'Y\n weights = np.dot(XTX_plus_I_inv, XTY_train)\n \n # Flatten the array\n weights_varying_lambda.append(weights.flatten())\n \n # Weights for each lambda are stored in the array column wise\n weights_train_dataset = np.transpose(np.array(weights_varying_lambda))\n \n return(weights_train_dataset)", "def get_weight_range(self):\n return self._mins[1], self._maxs[1]", "def calculate_averg_weights(locs, words):\r\n d = defaultdict(list)\r\n for pair in locs:\r\n for loc in pair[1]:\r\n d[loc].append(pair[0])\r\n word_prob = [0.5] * len(words) # neutral words with probability 0.5\r\n for k, v in d.items():\r\n if k < len(words):\r\n word_prob[int(k)] = sum(v) / len(v)\r\n return word_prob", "def edge_list ( # type: ignore\n self,\n ) -> typing.List[typing.Tuple[typing.List[Span], typing.List[Span], typing.Dict[str, float]]]:\n weighted_edges = []\n\n for i, source_topic in enumerate(self.node_list): # type: ignore\n for target_topic in self.node_list[i + 1 :]: # type: ignore\n weight = 0.0\n\n for source_member in source_topic:\n for target_member in target_topic:\n distance = abs(source_member.start - target_member.start)\n\n if distance:\n weight += 1.0 / distance\n\n weight_dict = {\"weight\": weight * self.edge_weight}\n weighted_edges.append((source_topic, target_topic, weight_dict))\n weighted_edges.append((target_topic, source_topic, weight_dict))\n\n return weighted_edges", "def calculateWeights(stations, df):\n\n #Variables\n\n #List all sensors present in full dataset\n sensors = df[\"Sensor\"].unique()\n\n weights = {}\n\n #################################################################################\n\n #Loop over all the sensors\n for sensor in sensors:\n\n #Make an array with the latitude and longitude of the sensor\n x = np.array(df[df[\"Sensor\"] == sensor].reset_index()[\"SensorLatitude\"][0],\n df[df[\"Sensor\"] == sensor].reset_index()[\"SensorLongitude\"][0]).reshape(1, -1)\n\n station_weights = {}\n #Loop over all stations\n for station in stations:\n\n #Make an array with the latitude and longitude of the station\n y = np.array(df[station + \" Lat\"][0],\n df[station + \" Lon\"][0]).reshape(1, -1)\n\n #Add station weight\n station_weights[station + \" weight\"] = rbf_kernel(x, y)\n\n weights[sensor] = station_weights\n\n return weights", "def get_neighbor_weights(gps_loc, N, k):\n\n weights = np.zeros((N, N))\n\n for i in xrange(N):\n # Finding the k-nearest neighbors.\n neighbors = np.vstack(sorted([(j, np.linalg.norm(gps_loc[i] - gps_loc[j])) for j in xrange(N)],\n key=lambda x: x[1])[1:k+1])[:, 0].astype('int')\n weights[i, neighbors] = 1\n\n return weights", "def compute_weights(self):\n weights = [sin(pi/( 2+(0.04*day)**4 ))**8 for day in range(-30,1)]\n # Last days are more important\n weights[30] = weights[30]+3\n weights[29] = weights[29]+2\n weights[28] = weights[28]+1\n self.weights = weights", "def weights(self) :\n\t\treturn sign(self.L) #1/(self.L + 0.00001) ", "def get_weights(self):\r\n return self.weights # returning the weight matrix\r", "def _compute_weighted_values(\n self,\n point_orogenh: ndarray,\n x_source: ndarray,\n y_source: ndarray,\n distance: ndarray,\n wind_speed: ndarray,\n ) -> Tuple[ndarray, ndarray]:\n source_values = np.fromiter(\n (\n point_orogenh[y, x]\n for (x, y) in zip(x_source.flatten(), y_source.flatten())\n ),\n np.float32,\n count=x_source.size,\n ).reshape(x_source.shape)\n\n # set standard deviation for Gaussian weighting function in grid\n # squares\n grid_spacing_m = 1000.0 * self.grid_spacing_km\n stddev = wind_speed * self.cloud_lifetime_s / grid_spacing_m\n variance = np.square(stddev)\n\n # calculate weighted values at source points\n value_weight = np.where(\n (np.isfinite(distance)) & (variance > 0),\n np.exp(np.divide(-0.5 * np.square(distance), variance)),\n 0,\n )\n sum_of_weights = np.sum(value_weight, axis=0)\n weighted_values = np.multiply(source_values, value_weight)\n\n return np.sum(weighted_values, axis=0), sum_of_weights", "def optimal_weights(n_points,er,cov):\r\n target_rs=np.linspace(er.min(),er.max(),n_points)\r\n weights=[minimize_vol(target_return,er,cov) for target_return in target_rs]\r\n return weights", "def get_mean_positive_weight(self, incoming_projection):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the similarity score given two distributions and the cost matrix.
def compute_sim_score(a, b, cost_matrix): if np.sum(a) == 0 or np.sum(b) == 0: return 0 cost = ot.emd2(a, b, cost_matrix) return 1.0 / cost
[ "def similarity(topic_vector_a: TopicVector, topic_vector_b: TopicVector) -> float:\n return matutils.cossim(topic_vector_a, topic_vector_b)", "def _compute_similarities(preds: List[Tuple[torch.FloatTensor, torch.FloatTensor]]) -> torch.FloatTensor:\n return -AlignmentMetric._compute_cost(preds)", "def _Similarity(self, motifid1, motifid2, metric='Bayesian'):\n if len(self.motifs[motifid1]['pssm']) == 1 and len(self.motifs[motifid2]['pssm']) == 1:\n m1 = self._ConvertToOldMotif(motifid1)\n m2 = self._ConvertToOldMotif(motifid2)\n similarity_score, offset, antisense, flag_merge = bayesian_motif_comp.BLiC_score(m1.pssm, m2.pssm)\n antisense = bool(antisense)\n return similarity_score, offset, antisense, flag_merge\n else:\n Info('ERROR: It has no matrix or more than 1 matrix: %s, %s'%(motifid1, motifid2))", "def similarity_distance(data, person1, person2):\n # list of shared items\n shared_items = {}\n for item in data[person1]:\n if item in data[person2]:\n shared_items[item] = 1\n\n # no ratings in common\n if len(shared_items) == 0:\n return 0\n\n # calculating distance\n sum_of_squares = 0\n for item in shared_items:\n sum_of_squares += pow(data[person1][item] - data[person2][item], 2)\n\n ret_val = 1 / (1 + sqrt(sum_of_squares))\n return ret_val", "def similarity(self, word1, word2):\r\n aa = self.dot(self.distributional_sims.get(word1, {}), self.distributional_sims.get(word1, {}))\r\n ab = self.dot(self.distributional_sims.get(word1, {}), self.distributional_sims.get(word2, {}))\r\n bb = self.dot(self.distributional_sims.get(word2, {}), self.distributional_sims.get(word2, {}))\r\n if (aa == 0 or ab == 0 or bb == 0): # if word had no features (one word sentence) then similarity must be 0\r\n return 0\r\n cos_similarity = ab / (math.sqrt(aa * bb)) # cosine similarity\r\n return cos_similarity", "def euclidean_similarity(dic1, dic2, term='eval'):\n return 1/(1+euclidean_dist(dic1, dic2, term))", "def calc_similarity(skills_1, skills_2):\n\n return sum(s1 * s2 for s1, s2 in zip(skills_1, skills_2))", "def calculateSimilarity(self, v1, v2):\n pass", "def compare_distributions(matrix1, matrix2, NDs=[-1, -2]):\r\n \r\n sys.stdout.write('Comparing distributions...')\r\n \r\n # Initialize the results dictionary: \r\n results = {'matrix1': 0, 'both':0, 'matrix2': 0}\r\n \r\n # Check that all matrices have the same dimensions and type\r\n type1 = type(matrix1)\r\n dims1 = (N.size(matrix1, 0), N.size(matrix1, 1))\r\n dims2 = (N.size(matrix2, 0), N.size(matrix2, 1))\r\n try:\r\n assert type(matrix2) is type1, \"Matrix types do not match\"\r\n assert dims1 == dims2, \"mismatch of dimensions for matrix\"\r\n except AssertionError, e:\r\n print '%s, exiting...' % e\r\n sys.exit(0)\r\n\r\n # Loop through matrices\r\n for i in xrange(dims1[0]):\r\n for j in xrange(dims1[1]):\r\n val1 = matrix1[i, j]\r\n val2 = matrix2[i, j]\r\n if val1 not in NDs and val2 in NDs:\r\n results['matrix1'] += 1\r\n elif val1 not in NDs and val2 not in NDs:\r\n results['both'] += 1\r\n elif val1 in NDs and val2 not in NDs:\r\n results['matrix2'] += 1\r\n \r\n print 'done.'\r\n return results", "def weighted_squared_distance(a1: np.ndarray, a2: np.ndarray, weights: np.ndarray) -> float:\n return np.sum((a1 - a2)**2 * weights)", "def similarity(self, other):\n\n user_ratings = {}\n paired_ratings = []\n\n for rating in self.ratings:\n user_ratings[rating.movie_id] = rating\n\n for r in other.ratings:\n u_r = user_ratings.get(r.movie_id)\n\n if u_r is not None:\n paired_ratings.append((u_r.score, r.score))\n\n if paired_ratings:\n return pearson(paired_ratings)\n else:\n return 0.0", "def compute_similarity_score(prot1_scop, prot2_scop):\n ##########################\n ### START CODING HERE ####\n ##########################\n # You need to decide whether you need this function for SCOP database.\n pass\n\n\n ########################\n ### END CODING HERE ####\n ########################", "def objective_distance(text1, text2):\n # whitespace tokenization\n text1 = tokenize_ws(text1)\n text2 = tokenize_ws(text2)\n\n # Add the '#' to the beginning of the two sentences\n size_x = len(text1) + 1\n size_y = len(text2) + 1\n\n matrix = np.zeros((size_x, size_y))\n # add first row and column\n for x in range(size_x):\n matrix [x, 0] = x\n for y in range(size_y):\n matrix [0, y] = y\n\n # iteration all cells\n for x in range(1, size_x):\n for y in range(1, size_y):\n if text1[x-1] == text2[y-1]: # if last character is the same\n matrix [x,y] = min(\n matrix[x-1, y] + 1,\n matrix[x, y-1] + 1,\n matrix[x-1, y-1]\n )\n else: # if last character is not the same\n matrix [x,y] = min(\n matrix[x-1, y] + 1,\n matrix[x, y-1] + 1,\n matrix[x-1, y-1] + 1\n )\n\n # return result divided by sentence length\n return matrix[size_x - 1, size_y - 1] / len(text1)", "def cosine_similarity(d1, d2):\n return dot_product(d1, d2) / (norm(d1) * norm(d2))", "def similarity_scores(self,other): \n word_score = compare_dictionaries(other.words,self.words)\n word_lengths_score = compare_dictionaries(other.word_lengths,self.word_lengths)\n stems_score = compare_dictionaries(other.stems, self.stems)\n sentence_lengths_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n punctuation_score = compare_dictionaries(other.punctuation, self.punctuation)\n return ([word_score] + [word_lengths_score] + [stems_score] + [sentence_lengths_score] + [punctuation_score])", "def wordnet_sim(set_a, set_b):\n # permutate all possible sim calcs\n possible_pairs = itertools.product(set_a, set_b)\n scores = []\n for pair in possible_pairs:\n score = pair[0].path_similarity(pair[1])\n if score is not None:\n scores.append(score)\n if scores:\n return max(scores)\n else:\n return 0.1", "def getSimilarityMetric(word1, word2):\n #empty lists to hold characters\n ch_word1 =[]\n ch_word2 = []\n #maps characters from each word to lists\n for ch in word1:\n ch_word1.append(ch)\n for ch2 in word2:\n ch_word2.append(ch2)\n #records lengths for each word\n count1 = len(ch_word1)\n count2 = len(ch_word2)\n #sets iteration value to 0\n iteration = 0\n score_left = 0\n #sets while loop to iterate until all the letters have been compared\n while iteration < count1 and iteration < count2:\n #as long as the letters match a score value will be increased by one\n if ch_word1[iteration] == ch_word2[iteration]:\n score_left = score_left + 1\n iteration = iteration + 1\n else:\n iteration = iteration + 1\n #reverses the lists so can be read from right to left\n rt_ch_word1 = ch_word1[-1::-1]\n rt_ch_word2 = ch_word2[-1::-1]\n iteration = 0\n score_right = 0\n #same as above except records score for right to left\n while iteration < count1 and iteration < count2:\n if rt_ch_word1[iteration] == rt_ch_word2[iteration]:\n score_right = score_right + 1\n iteration = iteration + 1\n else:\n iteration = iteration + 1\n #calculates the similarity\n similarity = ((score_left + score_right) / 2.0)\n return similarity", "def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_lengths_scores = compare_dictionaries(other.word_lengths, self.word_lengths)\n stems_scores = compare_dictionaries(other.stems, self.stems)\n sentence_lengths_socre = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n word_pair_score = compare_dictionaries(other.word_pair, self.word_pair)\n return [word_score, word_lengths_scores, stems_scores, sentence_lengths_socre, word_pair_score]", "def calc_distance_metric(weights):\n\n def metric(v1, v2):\n return spatial.distance.cosine(v1 * weights, v2 * weights)\n\n return metric" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the similarity score for comparison in the region [loc_start, loc_end].
def region_sim_score( ds, metric, cost_matrix, sample_factor, sfact_val1, sfact_val2, loc_start, loc_end ): weights = _get_weights( ds, metric, sample_factor, sfact_val1, sfact_val2, loc_start, loc_end ) region_sim = 0 for loc in range(loc_start, loc_end + 1): sim = weights[loc] * loc_sim_score( ds, metric, cost_matrix, sample_factor, sfact_val1, sfact_val2, loc ) region_sim = region_sim + sim return region_sim
[ "def _compute_region_score(self,\n saliency_map: torch.Tensor,\n region: Tuple[int, int, int, int],\n global_mean: float = 0) -> float:\n raw_score = self._select_region(saliency_map, region).sum().item()\n return raw_score - global_mean * self._region_size(region)", "def __compute_similarity(self):\n similarity_thr = config.getfloat('Params', 'similarity_thr')\n\n labeled_map = label(input=self.diff_map, neighbors=8)\n for region in regionprops(labeled_map):\n ty, tx, by, bx = region.bbox\n if region.area < config.getint('Params', 'min_area'): # min size of single blob\n self.diff_map[ty:by, tx:bx] = 0\n continue\n # Extract relevant rect from origin images:\n ref_patch = self.new_ref_img[ty:by, tx:bx]\n inspected_patch = self.shifted_inspected_img[ty:by, tx:bx]\n # comparison:\n ref_hist = cv2.calcHist(images=[ref_patch], channels=[0], mask=None, histSize=[256], ranges=[0, 256])\n inspected_hist = cv2.calcHist(images=[inspected_patch], channels=[0], mask=None, histSize=[256],\n ranges=[0, 256])\n norm_ref_hist = cv2.normalize(ref_hist, ref_hist).flatten()\n norm_inspected_hist = cv2.normalize(inspected_hist, inspected_hist).flatten()\n\n similarity_score = cv2.compareHist(norm_inspected_hist[10:], norm_ref_hist[10:],\n cv2.HISTCMP_CORREL) # filter black BG pixels\n if similarity_score > similarity_thr:\n self.diff_map[ty:by, tx:bx] = 0", "def dist(loc1, loc2):\n return math.sqrt((loc1[0]-loc2[0])**2 + (loc1[1]-loc2[1])**2)", "def distance_regions(r1, r2, fn=min, start=1e9):\n dists = [lib.eu_dist(*pair) for pair in\n itt.product(r1.borders(), r2.borders())]\n return reduce(fn, dists, start)", "def distance_regions_centra(r1,r2):\n return lib.eu_dist(r1.center(), r2.center())", "def overlap_score(q1, q2):\n score = 0\n return score", "def evaluate(self, player_loc=None, opp_loc=None):\n\n distance = math.sqrt(sum([(a - b) ** 2 for a, b in zip(player_loc, opp_loc)]))\n\n return distance", "def distance_regionprops(self):\n\n dist_x = np.array([c.centroid[1] for c in self.partner1]) - \\\n np.array([c.centroid[1] for c in self.partner2])\n dist_y = np.array([c.centroid[0] for c in self.partner1]) - \\\n np.array([c.centroid[0] for c in self.partner2])\n return np.sqrt(dist_x**2 + dist_y**2)", "def _compute_grid_region_scores(self, saliency_map: torch.Tensor) -> torch.Tensor:\n return self._creator.tensor(\n [self._compute_region_score(saliency_map, self._grid_region_from_index(saliency_map, i))\n for i in range(self.n_grid_squares(saliency_map))])", "def calculateSimilarity(self, v1, v2):\n pass", "def getSimilarities(cls, start_date, end_date):\n start_date_at_first_hour = DateUtils.date_at_first_hour(start_date)\n end_date_at_first_hour = DateUtils.date_at_first_hour(end_date if end_date else datetime.today())\n return SimilarityDAO().find(start_date_at_first_hour, end_date_at_first_hour)", "def compute_match_score(row,col,queryChar,targetChar,align_matrix,score_matrix):\n current_score = score_matrix[(queryChar,targetChar)]\n prev_score = align_matrix[row-1][col-1][0] #score is 1rst item in cell tuple\n # we get to assume that this is always filled in, because we initialize the\n # 0th row and column and always fill L->R top->bottom\n return current_score+prev_score", "def cost_estimate(start, end):\n return euclidean_distance(start[0], start[1], end[0], end[1])", "def similarity_score(self) -> float:\n return self.__score", "def get_distance(site_coord, rg_coord):\n\n site_id = site_coord['site_id'].values[0]\n site_lat = site_coord['latitude'].values[0]\n site_lon = site_coord['longitude'].values[0]\n\n rg_coord['latitude'] = rg_coord['latitude'].apply(lambda x: float(x))\n rg_coord['longitude'] = rg_coord['longitude'].apply(lambda x: float(x))\n\n rg_coord['dlat'] = rg_coord['latitude'].apply(lambda x: x - site_lat)\n rg_coord['dlon'] = rg_coord['longitude'].apply(lambda x: x - site_lon)\n rg_coord['dlat'] = np.radians(rg_coord.dlat)\n rg_coord['dlon'] = np.radians(rg_coord.dlon)\n\n rg_coord['a1'] = rg_coord['dlat'].apply(lambda x: np.sin(x/2)**2)\n rg_coord['a3'] = rg_coord['latitude'].apply(lambda x: np.cos(np.radians(x)))\n rg_coord['a4'] = rg_coord['dlon'].apply(lambda x: np.sin(x/2)**2)\n \n rg_coord['a'] = rg_coord['a1'] + (np.cos(np.radians(site_lat)) * \\\n rg_coord['a3'] * rg_coord['a4'])\n rg_coord['c']= 2 * np.arctan2(np.sqrt(rg_coord.a),np.sqrt(1-rg_coord.a))\n rg_coord['distance']= 6371 * rg_coord.c\n rg_coord = rg_coord.sort_values('distance', ascending = True)\n \n nearest_rg = rg_coord[0:4]\n nearest_rg['site_id'] = site_id\n nearest_rg = nearest_rg[['site_id', 'rain_id', 'distance']]\n \n return nearest_rg", "def ConditionFunction_Overlap(lon,lat,lonmin,lonmax,latmin,latmax):\n if (lon>=lonmin or lon<=lonmax) and lat>=latmin and lat<=latmax:\n return 1.0\n else:\n return 0.0", "def get_score(self, a, b):\n\t\treturn self.match_matrix[(a,b)]", "def calculate_score(s1, s2, l1, l2, startpoint):\n\n matched = \"\" # to hold string displaying alignements\n score = 0\n\n\n for i in range(l2):\n if (i + startpoint) < l1:\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\"\n score = score + 1\n else:\n matched = matched + \"-\"\n\n # some formatted output\n print(\".\" * startpoint + matched) \n print(\".\" * startpoint + s2)\n print(s1)\n print(score) \n print(\" \")\n\n return score, matched", "def _location_match(self, loc):\n preprocessed_loc = {\"lat\": loc['lat'], \"long\": loc['long'],\n \"start_time\": unix_time_from_iso(loc['start_time']),\n \"end_time\": unix_time_from_iso(loc['end_time'])}\n return any(Client.close_to(pastloc, preprocessed_loc, self.safe_distance) and Client.time_overlaps(pastloc, preprocessed_loc) for pastloc in self.locations)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If for any reason the parsing process returns an error, make sure that all data from a socket is removed to avoid data pollution with further parsing attempts.
def clearSocketData(self): if not isinstance(self.fp, socket.socket): # not a socket. Nothing to do here. return # Switch socket into non-blocking mode and read from it until it # is empty (and hence socket.error is raised): self.fp.setblocking(False) try: while True: self.fp.recv(SOCKET_BLOCK_SIZE) except socket.error: # socket has no more data, it can be considered as cleared pass finally: # Now set it back to blocking mode (no matter what exception): self.fp.setblocking(True)
[ "def _collectSocketDetails(self):\n del self.socket, self.fileno", "def worker():\r\n unprocessed=bytes()\r\n while True:\r\n try:\r\n chunk = self.socket.recv(2048)\r\n if len(chunk)==0: \r\n break\r\n else: \r\n unprocessed+=chunk \r\n result = self._parseData(unprocessed)\r\n #_parse data will return how many bytes was parse or -1 on error\r\n #we trim the pendingData buffer from the left using result as the index\r\n #if result == 0 it means no data was parsed and it will stay in unprocessed buffer until more data has arrived\r\n if result < 0:\r\n sys.stderr.write(\"TcpSocketAdapter._parseData error %d\"%result)\r\n break\r\n elif result > 0:\r\n unprocessed=unprocessed[result:]\r\n except (ConnectionAbortedError, OSError):\r\n break\r\n print(\"socket worker shutting down\")", "def on_error(self):\n self.log.info('Network error: disconnected from %s' % (self.address,))\n # Inform upstream Network of error\n self.hooks.error()\n self.socket = None\n #AsyncDelayed(self.connect, 10)()", "def distinguish_empty(self, io_method, args):\n result = io_method(*args)\n if not result:\n try:\n if self.socket.recv(1, socket.MSG_PEEK):\n # the socket has become readable in the time it took\n # to get here. raise a BlockingIOError so we can get\n # selected as readable again\n raise core.eagain()\n else:\n # the socket was actually disconnected\n raise core.EndOfStream\n except socket.error as e:\n if e.errno not in (errno.EAGAIN, errno.EWOULDBLOCK):\n # if this isn't a blocking io errno, raise the legitimate\n # exception\n raise\n # a socket can't be reopened for reading if it's\n # closed/shutdown, so it's still unreadable. raise a\n # BlockingIOError to communicate this upward\n raise io.BlockingIOError(*e.args)\n return result", "def _drain(response):\n try:\n response.read()\n except socket.error:\n pass", "def drop_socket(self) -> None:\n with suppress(ZMQError):\n if self.socket and not self.socket.closed:\n self.socket.close(0)\n self.socket = None", "def test_01_server_reply_unparseable_reply(self):\n self.fake_sfile.reply_buf = ['not even remotely parseable\\r\\n']\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._server_reply)", "def socket_exception(func):\n\n def read(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except socket.error:\n logger.debug('ignoring socket exception', exc_info=True)\n self.close()\n return read", "def discard_unread_input(self):\n\n\t\tremainder = self.__nread % tarfile.RECORDSIZE\n\t\tif remainder > 0:\n\t\t\tself.__sk.recv(tarfile.RECORDSIZE - remainder, socket.MSG_WAITALL)\n\t\t\tself.__nread += tarfile.RECORDSIZE - remainder", "def readexactly(sock, numbytes):\n bytes_received = b\"\"\n count = 0\n while count < numbytes:\n byte = sock.recv(1)\n if byte:\n count += 1\n bytes_received += byte\n else:\n raise asyncio.streams.IncompleteReadError(bytes_received, numbytes-count)\n\n return bytes_received", "def test_01_read_server_early_eof(self):\n self.fake_sfile.set_reply_buf(['line 1\\r\\n', '', \"Shouldn't read\\r\\n\"])\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._read_server, False)", "def _checkError(err):\n if bool(err): # Not an empty null-terminated string\n message = ctypes.string_at(err).decode(\"utf-8\")\n ldb.leveldb_free(ctypes.cast(err, ctypes.c_void_p))\n raise LevelDBException(message)", "def _consumeLength(self):\n lengthMatch = self._LENGTH.match(self._remainingData)\n if not lengthMatch:\n self._checkPartialLengthSpecification()\n raise IncompleteNetstring()\n self._processLength(lengthMatch)", "def error_received(self, exc): # pragma: no cover\n Log.error(\"datagram connection error [{}]\", exc)", "def socksservererror(self) :\n\t\ttry :\n\t\t\treturn self._socksservererror\n\t\texcept Exception as e:\n\t\t\traise e", "def connectionLost(self):\n del self.lines", "async def test_busy_loading_disconnects_socket(self, r):\n with pytest.raises(redis.BusyLoadingError):\n await r.execute_command(\"DEBUG\", \"ERROR\", \"LOADING fake message\")\n if r.connection:\n assert not r.connection._reader", "def _consumePayload(self):\n self._extractPayload()\n if self._currentPayloadSize < self._expectedPayloadSize:\n raise IncompleteNetstring()\n self._checkForTrailingComma()\n self._state = self._PARSING_LENGTH\n self._processPayload()", "def testReadInterrupted(self):\n for version in [4, 5, 6]:\n self.IncomingConnection(version, tcp_test.TCP_ESTABLISHED, self.netid)\n self.CloseDuringBlockingCall(self.accepted, lambda sock: sock.recv(4096),\n ECONNABORTED)\n # Writing returns EPIPE, and reading returns EOF.\n self.assertRaisesErrno(EPIPE, self.accepted.send, \"foo\")\n self.assertEquals(\"\", self.accepted.recv(4096))\n self.assertEquals(\"\", self.accepted.recv(4096))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A boolean array consists of a 4byte word (i.e. integer) determining the number of boolean values in the following dataLength4 bytes.
def xt_array_bool(self, lexeme): numBools = self.__unpack(XT_INT, 1)[0] # read the actual boolean values, including padding bytes: raw = self.read(lexeme.dataLength - 4) # Check if the array contains any NA values (encoded as \x02). # If so we need to convert the 2's to None's and use a numpy # array of type Object otherwise numpy will cast the None's into False's. # This is handled for us for numeric types since numpy can use it's own # nan type, but here we need to help it out. if 2 in raw: data = numpy.frombuffer(raw[:numBools], dtype=numpy.int8).astype(object) data[data == 2] = None else: data = numpy.frombuffer( raw[:numBools], dtype=numpyMap[lexeme.rTypeCode] ) return data
[ "def _rand_bool_array(length):\n return [NTSimulation._rand_bool() for _ in range(length)]", "def to_bool_list(bytes_array):\n ba = []\n index = 1\n for byte in bytes_array:\n for bit in range(7):\n if byte & 1 << bit:\n ba.append(index)\n index += 1\n return ba", "def _asbool(data):\n if data.dtype.itemsize == 1:\n return data.view(np.bool_)\n else:\n return data.astype(np.bool_)", "def encode_boolean_array(value):\n if not isinstance(value, list):\n raise TypeError(\"value is not an array\")\n buff = bytearray()\n buff.extend(varint.encode_unsigned(len(value)))\n for elem in value:\n if not isinstance(elem, bool):\n raise TypeError(\"array element is not a boolean\")\n buff.extend(encode_boolean(elem))\n return buff", "def find_pivots(bool_array):\r\n idx = 0\r\n count = 0\r\n while idx < len(bool_array)-1:\r\n falses = np.where(bool_array[idx:] == False)\r\n if len(falses[0]) == 0:\r\n break\r\n else:\r\n count += 1\r\n idx += falses[0][0]\r\n trues = np.where(bool_array[idx:] == True)\r\n if len(trues[0]) == 0:\r\n break\r\n else:\r\n count += 1\r\n idx += trues[0][0]\r\n return count", "def read_booleans(fileobj) :\n line = fileobj.readline().strip()\n if line :\n return True,np.fromstring(line,dtype=np.uint8)\n else :\n return False,[]", "def n_true(self):\n return np.sum(self.pixels)", "def testReadFileObjectBoolean(self):\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n\n definitions_file = self._GetTestFilePath([u'boolean.yaml'])\n with open(definitions_file, 'rb') as file_object:\n definitions_reader.ReadFileObject(definitions_registry, file_object)\n\n self.assertEqual(len(definitions_registry._definitions), 1)\n\n data_type_definition = definitions_registry.GetDefinitionByName(u'bool')\n self.assertIsInstance(data_type_definition, data_types.BooleanDefinition)\n self.assertEqual(data_type_definition.name, u'bool')\n self.assertEqual(data_type_definition.size, 1)\n self.assertEqual(data_type_definition.units, u'bytes')\n\n byte_size = data_type_definition.GetByteSize()\n self.assertEqual(byte_size, 1)", "def __len__(self):\n return self._bits", "def serve_boolean_array(self, table=None, key=None, value=None,\n length=10, interval=0, listen=False):\n return self._serve(table, key, value, interval, listen, \"setBooleanArray\",\n partial(self._rand_bool_array, length))", "def bool_refpts(refpts, n_elements):\n if isinstance(refpts, numpy.ndarray) and refpts.dtype == bool:\n diff = 1 + n_elements - refpts.size\n if diff == 0:\n return refpts\n elif diff > 0:\n return numpy.append(refpts, numpy.zeros(diff, dtype=bool))\n else:\n return refpts[:n_elements+1]\n else:\n brefpts = numpy.zeros(n_elements + 1, dtype=bool)\n brefpts[refpts] = True\n return brefpts", "def binarize(a: np.array) -> np.array:\n\n return a.astype(bool).astype(int)", "def create_boolean_groups(x, max_size, cnf_size):\n\n # strip the 0b hex header and pad with 0's\n raw_binary = bin(x)[2:].zfill(max_size) \n # map 0's and 1's to booleans\n booleans = [{'0': False, '1':True}[b] for b in raw_binary] \n # split booleans list into tuples of cnf size\n binary_groups = group_split(booleans, cnf_size) \n return binary_groups", "def get(self, ind):\n\n # Calculate the physical position of the bit in the Boolarray\n real_ind = ind // 8\n bitvec_ind = ind % 8\n\n # Return False if array does not reach unto real_ind\n if real_ind >= len(self.intarray):\n return False\n\n return 0 != self.intarray[real_ind] & 2**bitvec_ind\n # Returns a Boolean value", "def boolToBytes(v):\n return 0x01 if v else 0x00", "def write_boolean(self, datum):\n if datum:\n self.write(six.int2byte(1))\n else:\n self.write(six.int2byte(0))", "def readBinaryArray(self, *args) -> \"SbBool\":\n return _coin.SoInput_readBinaryArray(self, *args)", "def bitness():\n pass", "def testReadBooleanDataTypeDefinition(self):\n definition_values = {\n u'aliases': [u'BOOL'],\n u'attributes': {\n u'size': 4,\n },\n u'description': u'32-bit boolean type',\n }\n\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.DataTypeDefinitionsFileReader()\n\n data_type_definition = definitions_reader._ReadBooleanDataTypeDefinition(\n definitions_registry, definition_values, u'bool')\n self.assertIsNotNone(data_type_definition)\n self.assertIsInstance(data_type_definition, data_types.BooleanDefinition)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An array of one or more nullterminated strings. The XT_ARRAY_STR can contain trailing chars \x01 which need to be chopped off. Since strings are encoded as bytes (in Py3) they need to be converted into real strings.
def xt_array_str(self, lexeme): if lexeme.dataLength == 0: return '' raw = self.read(lexeme.dataLength) bytesStrList = raw.split(b'\0')[:-1] strList = [stringEncode(byteString) for byteString in bytesStrList] return numpy.array(strList)
[ "def s_xt_array_str(self, o):\n startPos = self._buffer.tell()\n rTypeCode = self.__s_write_xt_array_tag_data(o)\n\n # reshape into 1d array:\n o1d = o.reshape(o.size, order='F')\n # Byte-encode them:\n bo = [byteEncode(d) for d in o1d]\n # add empty string to that the following join with \\0 adds an\n # additional zero at the end of the last string!\n bo.append(b'')\n # Concatenate them as null-terminated strings:\n nullTerminatedStrings = b'\\0'.join(bo)\n\n padLength = padLen4(nullTerminatedStrings)\n self._buffer.write(nullTerminatedStrings)\n self._buffer.write(b'\\1\\1\\1\\1'[:padLength])\n\n # Update the array header:\n self.__s_update_xt_array_header(startPos, rTypeCode)", "def _build_c_string_array(values):\n arr = (ctypes.c_char_p * len(values))()\n arr[:] = values\n\n return arr", "def extend_array(array):\n for row in array:\n while len(row) < 6:\n row.append('')\n while len(array) < 4:\n array.append(['', '', '', '', '', ''])\n return array", "def _struct_string_field_with_nulls(array: pa.StructArray, name: str) -> pa.Array:\n assert array.offset == 0\n field = array.field(name)\n _, value_offsets, data = field.buffers()\n return pa.StringArray.from_buffers(\n len(array), value_offsets, data, array.buffers()[0], array.null_count, 0\n )", "def empty_data(cls, array, new_array):\n for data_value in array:\n if data_value is None:\n data_value = \"None\"\n elif data_value == \"\":\n data_value = \"Empty\"\n new_array.append(data_value.replace(\"'\", \"\"))", "def no_emptylines(array):\n new_array = []\n for a in array:\n if a != '\\n':\n new_array.append(a)\n return new_array", "def blobStrList(strs):\n b = BuildyBytes()\n for s in strs:\n b.addData(s.encode(\"utf-8\"))\n return b.b", "def _jsarr(x):\n return \"[\" + \", \".join(['\"{}\"'.format(i) for i in x]) + \"]\"", "def setStringArray(self, strings: 'char const *[]') -> \"void\":\n return _coin.SoInput_setStringArray(self, strings)", "def str2array(_array_str):\n return numpy.fromstring(_array_str, sep=' ')", "def test01b_String(self):\n\n root = self.rootgroup\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running %s.test01b_String...\" % self.__class__.__name__)\n\n shape = (3, 2, 2)\n\n # Create an string atom\n carray = self.h5file.create_carray(root, 'strings',\n atom=tb.StringAtom(itemsize=3),\n shape=shape,\n title=\"Array of strings\",\n chunkshape=(1, 2, 2))\n a = np.array([[[\"a\", \"b\"], [\"123\", \"45\"], [\"45\", \"123\"]]], dtype=\"S3\")\n carray[0] = a[0, ::2]\n a = np.array([[[\"s\", \"a\"], [\"ab\", \"f\"], [\"s\", \"abc\"], [\"abc\", \"f\"]]])\n carray[1] = a[0, ::2]\n\n # Read all the rows:\n data = carray.read()\n if common.verbose:\n print(\"Object read:\", data)\n print(\"Nrows in\", carray._v_pathname, \":\", carray.nrows)\n print(\"Second row in carray ==>\", data[1].tolist())\n\n self.assertEqual(carray.nrows, 3)\n self.assertEqual(data[0].tolist(), [[b\"a\", b\"b\"], [b\"45\", b\"123\"]])\n self.assertEqual(data[1].tolist(), [[b\"s\", b\"a\"], [b\"s\", b\"abc\"]])\n self.assertEqual(len(data[0]), 2)\n self.assertEqual(len(data[1]), 2)", "def test01a_String(self):\n\n root = self.rootgroup\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running %s.test01a_String...\" % self.__class__.__name__)\n\n shape = (3, 2, 2)\n # Create an string atom\n carray = self.h5file.create_carray(root, 'strings',\n atom=tb.StringAtom(itemsize=3),\n shape=shape,\n title=\"Array of strings\",\n chunkshape=(1, 2, 2))\n a = np.array([[[\"a\", \"b\"], [\"123\", \"45\"], [\"45\", \"123\"]]], dtype=\"S3\")\n carray[0] = a[0, 1:]\n a = np.array([[[\"s\", \"a\"], [\"ab\", \"f\"], [\"s\", \"abc\"], [\"abc\", \"f\"]]])\n carray[1] = a[0, 2:]\n\n # Read all the data:\n data = carray.read()\n if common.verbose:\n print(\"Object read:\", data)\n print(\"Nrows in\", carray._v_pathname, \":\", carray.nrows)\n print(\"Second row in carray ==>\", data[1].tolist())\n\n self.assertEqual(carray.nrows, 3)\n self.assertEqual(data[0].tolist(), [[b\"123\", b\"45\"], [b\"45\", b\"123\"]])\n self.assertEqual(data[1].tolist(), [[b\"s\", b\"abc\"], [b\"abc\", b\"f\"]])\n self.assertEqual(len(data[0]), 2)\n self.assertEqual(len(data[1]), 2)", "def parse_array_string_initializer(self, typ):\n # isinstance(initializer, expressions.StringLiteral):\n string = self.consume(\"STRING\")\n # Turn into sequence of characters:\n il = []\n location = string.loc\n for c in string.val:\n il.append(\n expressions.CharLiteral(\n ord(c), self.semantics.char_type, location\n )\n )\n il.append(\n expressions.CharLiteral(0, self.semantics.char_type, location)\n )\n initializer = expressions.ArrayInitializer(typ, il, location)\n return initializer", "def test_empty_bytestring(self):\n bytestring = b''\n assert convert_ATvalue(bytestring, True) == []", "def getArrayPtr(self) -> \"SbString const **\":\n return _coin.SbStringList_getArrayPtr(self)", "def test_utf8_bytes_in_an_array(self):\r\n # Python3 doesn't support bytestrings, don't run this test\r\n if str is unicode:\r\n return\r\n input = \"A r\\xc3\\xa9sum\\xc3\\xa9, also spelled resum\\xc3\\xa9 or resume\"\r\n output = input.split(\" \")\r\n output[1] = output[1][0:-1]\r\n input = array.array('c',input)\r\n output = [array.array('c',w) for w in output]\r\n for (itmO,itmV) in zip(output,tokenize_en(array.array('c',input))):\r\n self.assertEqual(itmO,itmV[0])\r\n self.assertEqual(input[itmV[1]:itmV[1]+len(itmV[0])],itmO)", "def com_arr_str(x,y):\n \n return [var for var in x if var in y and not var in ('[',']',',',\"'\",' ')]", "def bytes_array_to_native_str_object_array(a):\n return a.astype(str).astype(object)", "def ensure_unicode(segments):\n # not relevant in python 3.x\n # \"\"\"\n # >>> segments = ['Hi there!', 'My name is Peter.']\n # >>> ensure_unicode(segments)\n # ['Hi there!', 'My name is Peter.']\n # \"\"\"\n return [str(s) for s in segments]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the identifier of the recipe. For recipes, the name is the identifier.
def id(self): return self.recipe_name
[ "def identifier(self) -> str:\n if not self._identifier:\n self.load()\n return self._identifier", "def _get_id(self) -> \"std::string\" :\n return _core.Material__get_id(self)", "def get_recipe_by_name(self, name):\n pass", "def get_id(self) -> str:\r\n return self.resource_id", "def _id_from_name(resource_config, resources, typename):\n return obj_from_name(resource_config, resources, typename).id", "def get_id(self, name):\n try:\n return self.d[name.replace(' ', '_')]\n except KeyError:\n return None", "def _extract_id(self):\n if self.metadata:\n return self.metadata.get(\n self.__class__.__name__.title() + 'Id',\n None\n )\n else:\n return None", "def read_identifier(self, line):\r\n m = re.match(IDENTIFIER_REG, line)\r\n if m is None:\r\n return \"\"\r\n else:\r\n return m.group(0)", "def _get_identifier(model):\n pass", "def get_id(self):\n return self.data['id']", "def get_line_identifier(self):", "def _get_id(self) -> \"std::string\" :\n return _core.CommandDefinition__get_id(self)", "def get_recipe_by_name(self, name):\n for _, recipe in self.recipe_list.items():\n if recipe.name == name:\n print(recipe)\n return recipe", "def resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_id\")", "def get_source_identifier():", "def extract_id(cls, e):\n return e[cls.id_key]", "def identifier(self):\n r = Vulnerability.__IDENTIFIER__.format(\n name=self.name,\n description=self.description,\n cwe=self.cwe,\n path=self.path,\n cvss=self.cvss,\n criticality=self.criticality\n )\n return r", "def get_rubric_id(self):\n return # osid.id.Id", "def study_name_to_id(self, name, dry_run=False):\n if self.verbose:\n print(\"STUDY NAME TO ID\")\n if dry_run:\n print(\"DRY_RUN:: returning a dummy id\")\n return \"1\"\n else:\n return self.name_to_id.get(name, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the name of the recipe.
def name(self): return self.recipe_name
[ "def id(self):\n return self.recipe_name", "def bioc_name(recipe):\n return MetaData(recipe).meta['source']['fn'].split('_')[0]", "def get_name(self):\n return self.__name_army", "def _get_name(self):\n return self._trend['name']", "def get_recipe_by_name(self, name):\n pass", "def get_resource_name(self):\n return self.current_item[\"name\"] + \"Resource\"", "def get_name_item(self):\n return self.name_item", "def _get_name(self) -> \"std::string\" :\n return _core.CommandDefinition__get_name(self)", "def _get_name(self) -> \"std::string\" :\n return _core.Material__get_name(self)", "def recipe_names(self):\n return self._recipe_names", "def name(self):\n return self._pr.title", "def get_recipe_by_name(self, name):\n for _, recipe in self.recipe_list.items():\n if recipe.name == name:\n print(recipe)\n return recipe", "def _get_name(self) -> \"std::string\" :\n return _core.ListItem__get_name(self)", "def get_name(self):\r\n return self.__nombre", "def get_name():\n return _(strings.bot_title)", "def _get_name(self) -> \"std::string\" :\n return _core.ControlDefinition__get_name(self)", "def item_name(context: dict) -> str:\n return os.path.basename(context['item_path'])", "def _get_name(self) -> \"std::string\" :\n return _core.TextCommandPalette__get_name(self)", "def get_name() -> str:\n package_name = os.path.basename(PACKAGE_DIR)\n return package_name", "def _get_name(self) -> \"std::string\" :\n return _core.Workspace__get_name(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the definition of the recipe.
def set_definition_and_payload(self, definition): warnings.warn("Recipe.set_definition_and_payload is deprecated, please use get_settings", DeprecationWarning) definition._payload_to_str() return self.client._perform_json( "PUT", "/projects/%s/recipes/%s" % (self.project_key, self.recipe_name), body=definition.data)
[ "async def pboss_edit(self, ctx, term, *, definition):\n await self._pboss_add(ctx, term, definition, False)", "def _configure_using_fluent_definition(self):\r\n definition = Parser.parse(self.signature)\r\n\r\n self._config.set_name(definition[\"name\"])\r\n\r\n for name, flags, description, default in definition[\"arguments\"]:\r\n self._config.add_argument(name, flags, description, default)\r\n\r\n for long_name, short_name, flags, description, default in definition[\"options\"]:\r\n self._config.add_option(long_name, short_name, flags, description, default)", "def define(self,_):\n pass", "def mark_def (self, var):\n self._defs.add (var)", "async def pglossary_edit(self, ctx, term, *, definition):\n await self._pglossary_add(ctx, term, definition, False)", "async def dungeon_edit(self, ctx, term: str, *, definition: str):\n await self._dungeon_add(ctx, term, definition, False)", "def set_recipe(self, recipe: object) -> None:\n for i in range(len(self.ingredient_list)):\n self.ingredient_list[i].set_component_recipe(recipe)", "def set_role_definition(\n self, scope: \"Union[str, KeyVaultRoleScope]\", **kwargs\n ) -> \"KeyVaultRoleDefinition\":\n permissions = [\n self._client.role_definitions.models.Permission(\n actions=p.actions,\n not_actions=p.not_actions,\n data_actions=p.data_actions,\n not_data_actions=p.not_data_actions,\n )\n for p in kwargs.pop(\"permissions\", None) or []\n ]\n\n properties = self._client.role_definitions.models.RoleDefinitionProperties(\n role_name=kwargs.pop(\"role_name\", None),\n description=kwargs.pop(\"description\", None),\n permissions=permissions,\n assignable_scopes=kwargs.pop(\"assignable_scopes\", None),\n )\n parameters = self._client.role_definitions.models.RoleDefinitionCreateParameters(properties=properties)\n\n definition = self._client.role_definitions.create_or_update(\n vault_base_url=self._vault_url,\n scope=scope,\n role_definition_name=str(kwargs.pop(\"name\", None) or uuid4()),\n parameters=parameters,\n **kwargs\n )\n return KeyVaultRoleDefinition._from_generated(definition)", "def add_recipe(self, recipe):\n pass", "def definition(self, name):\n def decorator(schema_cls, **kwargs):\n self.spec.definition(name, schema=schema_cls, **kwargs)\n return schema_cls\n return decorator", "def fit_recipe(self):\n pass", "def make_recipe(self, recipe: str) -> str:\n return f\"\"\"make PLATFORM={self.PLATFORM} TARGET_PROJECT={self.TARGET_PROJECT} DESIGN={self.DESIGN} TARGET_CONFIG={self.TARGET_CONFIG} PLATFORM_CONFIG={self.PLATFORM_CONFIG} {recipe}\"\"\"", "def set_component_recipe(self, recipe: object):\n for i in range(len(self.components)):\n self.components[i].recipe = recipe", "def AddDefrule(self, dr):\n if isinstance(dr, BLNlpClipsDefruleMapping):\n self._defrules.append(dr)", "async def define( self, ctx, *args ):\n try:\n query = ' '.join( args )\n if query in config.DEFINITIONS.keys():\n await ctx.send( config.DEFINITIONS[query.lower()] )\n else:\n await ctx.send( \"Term not found.\" )\n except Exception as e:\n print(e)", "def dynskel_defs(self, stream, visitor):\n pass", "async def define(self, word: str):\n api_key = \"e02fb0b8-5f3e-4d5c-b868-87dd7de88974\"\n\n # Checks for mutliple words and only uses first\n if \" \" in word:\n word = word.split(\" \")[0]\n\n url = \"http://www.dictionaryapi.com/api/v1/references/collegiate/xml/{}?key={}\".format(word.lower(), api_key)\n\n response = requests.get(url)\n results = ElementTree.fromstring(response.text)\n\n \"\"\"\n Tag descriptions:\n\n entry_list - root\n entry - ( ͡° ͜ʖ ͡°)\n fl - word type\n def - contains date and definitions\n dt - sub tag of def, contains definitions\n\n suggestion - returns if the word can't be found\n \"\"\"\n\n suggestions = []\n\n for entry in islice(results, 0, 3):\n # Add suggestions to list if the word isn't found\n if entry.tag == \"suggestion\":\n suggestions.append(entry.text)\n continue\n word = entry.find(\"ew\").text\n word_type = entry.find(\"fl\").text\n word_def = entry.find(\"def\").find(\"dt\").text\n\n try:\n # First definition sometimes returns blank results for some\n # reason, skipping to the next description tag fixes it.\n if word_def == \":\":\n word_def = entry.find(\"def\").findall(\"dt\")[1].text\n\n await self.bot.say(\"**{}**\\n*{}*\\n{}\".format(\n word, word_type, word_def)\n )\n except IndexError:\n continue\n\n if suggestions:\n await self.bot.say(\n \"That's not a word, maybe you meant: {}\".format(\n \", \".join(suggestions)\n )\n )", "def set_factory(self, thing: type, value, overwrite=False):\n if thing in self.factories and not overwrite:\n raise DiayException(\"factory for %r already exists\" % thing)\n self.factories[thing] = value", "def skel_defs(self, stream, visitor):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the status of this recipe. The status of a recipe is made of messages from checks performed by DSS on the recipe, of messages related to engines availability for the recipe, of messages about testing the recipe on the engine, ...
def get_status(self): data = self.client._perform_json( "GET", "/projects/%s/recipes/%s/status" % (self.project_key, self.recipe_name)) return DSSRecipeStatus(self.client, data)
[ "def get_status (self):\n return self.__status", "def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)", "def get_status(self):\n return StatusAPI.from_client(self)", "def GetStatus(self):\n self.__SendMsg(\"status\")\n ##TODO: Parse the response into some struct so it can be queried later.\n\n ## \"Status\" is the only command that returns a multi\n ## line response so handle it separately.\n response = \"\"\n while(self.SocketIsReadable()):\n data = self.my_Socket.recv(1)\n if not data:\n break\n else:\n response += data.decode(\"UTF-8\")\n return response", "def readStatus(message):", "def status(self):\n return self._status_category.get(\"key\")", "def get_status(self):\n\n return get_repo_text(self._get_repo(), 'status')", "def status(self, result, config=None):\r\n return result['status']", "def status(self):\n return self.proto_wo_data.header.status", "def status(self) -> 'outputs.UpdateRunStatusResponse':\n return pulumi.get(self, \"status\")", "def get_status(self):\n self.doGet(STATUS_API, DEFAULT_HEADERS)\n self.parse_response_as_json()", "def status(self):\n if self.failures > 0:\n return \"partial\"\n else:\n return \"success\"", "def Status(con, category, message):", "def status(self) -> 'outputs.JobStatusResponse':\n return pulumi.get(self, \"status\")", "def check_status(self):\n self.logger.debug('Server - td-agent-bit - check_status call.')\n self.change_service_status(\"status\")\n return self.status", "def status_message(self) -> str:\n\n # okay to copy since we're not modifying the components\n successful_emoji = list(filter(lambda x: not x.failed and x.status, self.__emojis))\n failed_emoji = list(filter(lambda x: x.failed and x.status, self.__emojis))\n\n status = \"\"\n\n if successful_emoji:\n status += \"**Successful Yoinks:**\\n\" + '\\n'.join(x.status for x in successful_emoji)\n\n if failed_emoji:\n status += \"\\n\\n**Failed Yoinks:**\\n\" + '\\n'.join(x.status for x in failed_emoji)\n\n if not status:\n status = 'Failed to generate status message'\n bot_logger.error(\n f'Failed to Generate Yoink Status Message.\\n'\n f'Details: Guild={self.__guild.id}, EmojiComponents={self.__emojis}'\n )\n\n return status", "def status(self):\n \n return self._make_request(\"server/status\").json()", "def get_status(self):\n if self.running_arping:\n return \"Arping\"\n else:\n if self._online:\n return \"Online\"\n else:\n return \"Offline\"", "def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')", "def status(self):\n return Status.compute_status([t.status for t in self.tasks])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the metadata attached to this recipe. The metadata contains label, description checklists, tags and custom metadata of the recipe
def get_metadata(self): return self.client._perform_json( "GET", "/projects/%s/recipes/%s/metadata" % (self.project_key, self.recipe_name))
[ "def get_metadata(self):\n return meta.get_metadata(self.ast)", "def parse_metadata(self, recipe=None):\n if recipe:\n self.prepare_taskdata([recipe])\n filename = self.provide_to_fn(recipe)\n return self.parse_recipe_file(filename)\n else:\n return self.localdata", "def get_rubric_metadata(self):\n return # osid.Metadata", "def metadata(self) -> dict[str, Any]:", "def get_metadata(self):\r\n return self.manager.get_metadata(self, node=self)", "def metadata(self):\n return self.Model.metadata", "def ex_get_metadata_for_node(self, node):\r\n return node.extra['metadata']", "def metadata(self):\r\n metadataurlpath = 'content/items/' + self.itemid + '/info/metadata/metadata.xml'\r\n try:\r\n return self._portal.con.get(metadataurlpath, try_json=False)\r\n\r\n # If the get operation returns a 400 HTTP Error then the metadata simply\r\n # doesn't exist, let's just return None in this case\r\n except HTTPError as e:\r\n if e.code == 400 or e.code == 500:\r\n return None\r\n else:\r\n raise e", "def describe(self):\n attributes = {\n 'client': 'Metahelper',\n 'sessionId': self._session.get_session_id(),\n 'apiVersion': self._session.get_api_version()\n }\n\n request = msg.DESCRIBE_METADATA_MSG.format(**attributes)\n\n headers = {'Content-type': 'text/xml', 'SOAPAction': 'describeMetadata'}\n res = self._session.post(self._get_api_url(), headers=headers, data=request)\n if res.status_code != 200:\n raise Exception(\n \"Request failed with %d code and error [%s]\" %\n (res.status_code, res.text))\n\n root = ET.fromstring(res.text)\n metadata_objects = root.find(\n 'soapenv:Body/mt:describeMetadataResponse/mt:result',\n self._XML_NAMESPACES)\n if metadata_objects is None:\n raise Exception(\"Result node could not be found: %s\" % res.text)\n metadata_objects_list = []\n for metadata_object in metadata_objects:\n directory_name = metadata_object.find('mt:directoryName', self._XML_NAMESPACES)\n in_folder = metadata_object.find('mt:inFolder', self._XML_NAMESPACES)\n metafile = metadata_object.find('mt:metaFile', self._XML_NAMESPACES)\n suffix = metadata_object.find('mt:suffix', self._XML_NAMESPACES)\n xml_name = metadata_object.find('mt:xmlName', self._XML_NAMESPACES)\n if (\n directory_name is None and in_folder is None and metafile is None\n and suffix is None and xml_name is None\n ):\n continue\n metadata_objects_list.append({\n \"directory_name\": directory_name.text if directory_name is not None else \"\",\n \"in_folder\": in_folder.text if in_folder is not None else \"\",\n \"metafile\": metafile.text if metafile is not None else \"\",\n \"suffix\": suffix.text if suffix is not None else \"\",\n \"xml_name\": xml_name.text if xml_name is not None else \"\",\n })\n return metadata_objects_list", "def get_learning_objectives_metadata(self):\n return # osid.Metadata", "def readExistingMetaData(self: object) -> dict[str, list[str]]:\n\t\twith exiv.Image(f\"{self.rootPath}/{self.fileName}\") as f:\n\t\t\tdata = f.read_xmp()\n\t\treturn data", "def get_metadata(self, image_name: str) -> ImageMetadata:\n pass", "def metadata(self):\n return metadata_for_forecasts()", "def get_item_metadata(self, handle):\n raise(NotImplementedError())", "def resource_metadata(self):\n return self.__data[u'resource_metadata']", "def get_metadata(self, base):\n try:\n xml = ET.fromstring(self.api_request('GET', '{}/metadata'.format(base)).text)\n except NoSuchResourceError:\n return {}\n meta = {}\n for entry in xml.findall('.//vcd:MetadataEntry', _NS):\n key = entry.find('./vcd:Key', _NS).text\n value = entry.find('.//vcd:Value', _NS).text\n type_ = entry.find('./vcd:TypedValue', _NS).attrib[self._TYPE_KEY]\n # Try to convert the value\n try:\n if type_ == 'MetadataNumberValue':\n # Number actually means int, but the number can be in the format 10.0\n try:\n meta[key] = int(value)\n except ValueError:\n meta[key] = int(float(value))\n elif type_ == \"MetadataBooleanValue\":\n meta[key] = (value.lower() == 'true')\n elif type_ == \"MetadataDateTimeValue\":\n # Don't attempt to parse the timezone\n meta[key] = datetime.strptime(value[:19], '%Y-%m-%dT%H:%M:%S')\n else:\n meta[key] = value\n except (ValueError, TypeError):\n raise BadConfigurationError('Invalid metadata value')\n return meta", "def metadata(self) -> Mapping[str, np.ndarray]:\n return self._metadata.copy()", "def getMetadataLabels(self):\n return { \"title\": self.label }", "def GetMetadata() -> Dict[str, Any]:\n return {\n 's64da_benchmark_type': _BENCHMARK_TYPE.value,\n 's64da_schema': _SCHEMA.value,\n 's64da_scale_factor': _SCALE_FACTOR.value,\n 's64da_max_job': _MAX_JOB.value,\n 's64da-oltp-workers': _OLTP_WORKERS.value,\n 's64da-olap-workers': _OLAP_WORKERS.value,\n 's64da-duration': _DURATION.value,\n 's64da-ramp_up_duration': _RAMP_UP_DURATION.value,\n 's64da-olap-timeout': _OLAP_TIMEOUT.value,\n 's64da-run_oltp_on_replica': _RUN_OLTP_ON_REPLICA.value\n }", "def get_meta (self) :\n return self._meta" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the metadata on this recipe.
def set_metadata(self, metadata): return self.client._perform_json( "PUT", "/projects/%s/recipes/%s/metadata" % (self.project_key, self.recipe_name), body=metadata)
[ "def set_metadata(self, jobid, metadata):", "def set_metadata(self, metadata, clear=False):\n self.client.set_object_metadata(self.container, self, metadata, clear=clear)", "def set_metadata(self, metadata: MetaData) -> None:\n self._parent.set_metadata(metadata)\n self._child.set_metadata(metadata)", "def set_metadata(self, dict_):\n for key, value in dict_.items():\n self.config.set('metadata', key, value) \n if self.status == STATE_NEEDS_METADATA and \\\n self.config.get('metadata', 'author') and \\\n self.config.get('metadata', 'title'):\n\n self.config.set('process', STATE_NEED_PDFS)\n\n self.write_config()\n\n self._set_current_status()", "def set_Metadata(self, value):\n super(UpdateTicketInputSet, self)._set_input('Metadata', value)", "def set_metadata(self, metadata, clear=False, prefix=None):\r\n self.client.set_object_metadata(self.container, self, metadata,\r\n clear=clear, prefix=prefix)", "def add_metadata(self, metadata: Dict[str, str]) -> None:\n self.metadata.update(metadata)", "def setMetadata(self, metadata):\n if metadata is not None:\n self._metadata.update(metadata)\n\n # Ensure that we have the obs type required by calibration ingest\n self._metadata[\"OBSTYPE\"] = self._OBSTYPE\n self._metadata[self._OBSTYPE + \"_SCHEMA\"] = self._SCHEMA\n self._metadata[self._OBSTYPE + \"_VERSION\"] = self._VERSION\n\n if isinstance(metadata, dict):\n self.calibInfoFromDict(metadata)\n elif isinstance(metadata, PropertyList):\n self.calibInfoFromDict(metadata.toDict())", "def set_meta( self, dataset, **kwd ):\n data_lines = 0\n for line in file( dataset.file_name ):\n line = line.strip()\n if line and not line.startswith( '#' ):\n data_lines += 1\n dataset.metadata.data_lines = data_lines", "def change_instance_metadata(self, *args, **kwargs):\n pass", "def set_metadata(self, new_metadata):\n try:\n self.ast[0]['unMeta'] = new_metadata\n except (IndexError, KeyError):\n self.ast = [{'unMeta': new_metadata}, []]", "def set_metadata(*, name=APP_NAME, author=APP_AUTHOR, version=APP_VERSION):\n global APP_NAME, APP_VERSION, APP_AUTHOR, cli\n\n if cli._inside_context_manager:\n raise RuntimeError('You must run set_metadata() before cli()!')\n\n APP_NAME = name\n APP_VERSION = version\n APP_AUTHOR = author\n cli = MILC(name, version, author)", "def set_metadata_for_node(self, node, metadata):\r\n return self.manager.set_metadata(self, metadata, node=node)", "def set_metadata(self, loadbalancer, metadata):\r\n return loadbalancer.set_metadata(metadata)", "def set_metadata(self, metadata, clear=False, prefix=None):\r\n return self.client.set_container_metadata(self, metadata, clear=clear,\r\n prefix=prefix)", "def setMetadata(self, collection, metadata, allowNull=False):\n if 'meta' not in collection:\n collection['meta'] = {}\n\n # Add new metadata to existing metadata\n collection['meta'].update(metadata.items())\n\n # Remove metadata fields that were set to null (use items in py3)\n if not allowNull:\n toDelete = [k for k, v in metadata.items() if v is None]\n for key in toDelete:\n del collection['meta'][key]\n\n self.validateKeys(collection['meta'])\n\n collection['updated'] = datetime.datetime.utcnow()\n\n # Validate and save the collection\n return self.save(collection)", "def setMetadata(self, type, metadata, key, uri, flags=0):\n ret = libvirtmod.virDomainSetMetadata(self._o, type, metadata, key, uri, flags)\n if ret == -1: raise libvirtError ('virDomainSetMetadata() failed', dom=self)\n return ret", "def set_meta(the_vm, meta_data):\n expected = {'component', 'created', 'version', 'generation', 'configured'}\n provided = set(meta_data.keys())\n if not expected == provided:\n error = \"Invalid meta data schema. Supplied: {}, Required: {}\".format(provided, expected)\n raise ValueError(error)\n spec = vim.vm.ConfigSpec()\n spec_info = ujson.dumps(meta_data)\n spec.annotation = spec_info\n task = the_vm.ReconfigVM_Task(spec)\n consume_task(task)", "def add_metadata(self, metadata):\n\n self.add_node(metadata, type='metadata')", "def ex_set_object_metadata(self, obj, meta_data):\r\n object_path = self._get_object_path(obj.container, obj.name)\r\n params = {'comp': 'metadata'}\r\n headers = {}\r\n\r\n self._update_metadata(headers, meta_data)\r\n\r\n response = self.connection.request(object_path, method='PUT',\r\n params=params,\r\n headers=headers)\r\n\r\n if response.status != httplib.OK:\r\n response.parse_error('Setting metadata')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a handle to manage discussions on the recipe.
def get_object_discussions(self): return DSSObjectDiscussions(self.client, self.project_key, "RECIPE", self.recipe_name)
[ "def discussion(self, disscussion_id, text_format=None):\n endpoint = 'discussions/{}'.format(disscussion_id)\n params = {'text_format': text_format or self.response_format}\n return self._make_request(path=endpoint, params_=params, public_api=True)", "def create_divided_discussions(self):\n divided_inline_discussions = ['Topic A']\n divided_course_wide_discussions = [\"Topic B\"]\n divided_discussions = divided_inline_discussions + divided_course_wide_discussions\n\n # inline discussion\n ItemFactory.create(\n parent_location=self.course.location,\n category=\"discussion\",\n discussion_id=topic_name_to_id(self.course, \"Topic A\"),\n discussion_category=\"Chapter\",\n discussion_target=\"Discussion\",\n start=datetime.now()\n )\n # course-wide discussion\n discussion_topics = {\n \"Topic B\": {\"id\": \"Topic B\"},\n }\n\n config_course_cohorts(\n self.course,\n is_cohorted=True,\n )\n\n config_course_discussions(\n self.course,\n discussion_topics=discussion_topics,\n divided_discussions=divided_discussions\n )\n return divided_inline_discussions, divided_course_wide_discussions", "def load_discussion(self, discussion_id: int):\n cls = self.connection.Base.classes.Discussion\n query = self.connection.session.query(cls).filter_by(discussion_id=discussion_id)\n discussion = query.scalar()\n return discussion", "def facet_discussion(self):\r\n\r\n self.object = self.get_object()\r\n discussion = self.object.discussion\r\n comments = discussion.comment_set.all().order_by('date')\r\n form = CommentForm()\r\n return {'discussion': discussion, 'comments': comments, 'form': form}", "def __init__(self, **kwargs):\n if 'owner' not in kwargs.keys():\n d = Thing('discussion')\n else:\n d = Thing('discussion', kwargs['owner'].id)\n if 'role' not in kwargs.keys():\n role = None\n else:\n role = kwargs['role']\n title = kwargs['title']\n discType = kwargs['discType']\n d['discType'] = discType\n d['disabled'] = '0'\n d['deleted'] = '0'\n d['ups'] = '0'\n d['downs'] = '0'\n d['views'] = '0'\n d['title'] = title\n d['url'] = urlify(title)\n if 'geoScope' in kwargs:\n d['scope'] = kwargs['geoScope']\n d['public'] = '1'\n if 'tags' in kwargs:\n d['tags'] = kwargs['tags']\n d['numComments'] = '0' # should instead do a count query on number of comments with parent code of this discussion\n # Optional arguments\n if 'workshop' in kwargs and kwargs['workshop'] != None:\n workshop = kwargs['workshop']\n d = generic.linkChildToParent(d, workshop)\n d = generic.addedItemAs(d, kwargs['privs'], role)\n if 'owner' in kwargs.keys():\n d = generic.linkChildToParent(d, kwargs['owner'])\n if 'text' in kwargs:\n d['text'] = kwargs['text']\n if 'attachedThing' in kwargs.keys():\n d = generic.linkChildToParent(d, kwargs['attachedThing'])\n \n if 'workshop_searchable' in d: \n if discType != 'update' and discType != 'general':\n d['workshop_searchable'] = '0'\n \n commit(d)\n \n d['urlCode'] = toBase62(d)\n commit(d)\n \n if d['discType'] == 'organization_general' or d['discType'] == 'organization_position':\n d['organization_searchable'] = '1'\n if 'position' in kwargs.keys():\n d['position'] = kwargs['position']\n if 'positions' in session:\n sPositions = session['positions']\n else:\n sPositions = {}\n urlCode = d['urlCode']\n sPositions[urlCode] = d['position']\n session['positions'] = sPositions\n session.save()\n \n commit(d)\n\n self.d = d", "def isDiscussionAllowedFor(obj):", "def createDiscussion(_urlName, _discussionCategory, _SubsectionDisplayName, _discussionID, _displayName):\n try:\n xmlfile = path + \"/discussion/\" + _urlName + \".xml\"\n page = etree.Element('discussion', discussion_category=_discussionCategory,\n discussion_target=_SubsectionDisplayName, discussion_id=_discussionID,\n display_name=_displayName)\n # Make a new document tree\n doc = etree.ElementTree(page)\n\n doc.write(xmlfile, pretty_print=True, xml_declaration=False, encoding='utf-8')\n except Exception, e:\n addtolog(\"log\", u\"<li id='error\" + unicode(\n len(stufftoreturn[\"error\"])) + \"'>\" + WRONGIMG + u\"Error message:\" + e.message + u\"</li>\")\n addtolog(\"error\", u\"<a href='#error\" + unicode(\n len(stufftoreturn[\"error\"])) + \"'><p>\" + WRONGIMG + u\"Error en la creación del foro</p>\")\n addtolog(\"error\", u\"<p>\" + WRONGIMG + u\"Error message:\" + e.message + u\"</p></a>\")", "def detail(id):\n\td = Discussion.objects.get_or_404(id=id)\n\tcontext = get_school_context(d)\n\tif not g.school==context:\n\t\tflash(_(\"You've been redirected to where the discussion was created.\"))\n\t\treturn redirect(url_for_school('discussions.detail', school=context, id=d.id), code=301)\n\tother_schools = [school for school in d.schools if not school==context]\n\tcomments = Comment.objects.filter(discussion=d).order_by('-created')\n\t# Passing some permissions related functions on to Jinja\n\t#current_app.jinja_env.globals['can_edit_proposal'] = can_edit_proposal\n\treturn render_template('discussion/detail.html',\n\t\ttitle = d.title,\n\t\tdiscussion = d,\n\t\tcomments = comments,\n\t\tother_schools = other_schools)", "def get_comment(self):\n return self.get_abstract_item(\"General\", \"Comment\")", "def create():\n\tschools = g.all_schools\n\tform = AddDiscussionForm()\n\tif form.validate_on_submit():\n\t\td = start_discussion(request.form.get(\"text\"), form=form)\n\t\treturn redirect(url_for('discussions.detail', id=d.id))\n\treturn render_template('discussion/create.html',\n\t\ttitle=_('Start a discussion'),\n\t\tform=form)", "def check_is_discussion_item(item):\n return (\n isinstance(item, Thread)\n or isinstance(item, Comment)\n or isinstance(item, Reply)\n )", "def get_model():\n from casepro.msg_board.models import MessageBoardComment\n\n return MessageBoardComment", "def discussion_replies(self,\n disscussion_id,\n per_page=None,\n page=None,\n text_format=None):\n endpoint = 'discussions/{}/forum_posts'.format(disscussion_id)\n params = {'per_page': per_page,\n 'page': page,\n 'text_format': text_format or self.response_format}\n return self._make_request(path=endpoint, params_=params, public_api=True)", "def get_newsitem(self):\n if self.parent.is_news_item():\n return self.parent.newsitem\n else: \n return self.parent.comment.get_newsitem()", "def get_note(self):\n cmd = self._repo._repo.git\n try:\n return cmd.notes('--ref', self.NOTE_REF, 'show', self.sha)\n except GitCommandError:\n return None", "async def markdown(self):\n return await self.wiki.http.get_markdown(self.title)", "def getId(self):\n return self.__disciplineID", "def discussion_page(request, assignment_id = None):\n assignment = get_object_or_404(Assignment, id=assignment_id)\n allocation = get_object_or_404(Allocation, student = request.user.student, assignment = assignment)\n criterias = AssignmentCriteria.objects.filter(assignment = assignment).all()\n request_student = request.user.student\n request_student_submission = Submission.objects.get(assignment = assignment, student = request_student)\n\n submissions = assignment_service.get_submissions_of_peers(assignment = assignment, allocation = allocation)\n\n content = {\n 'user' : request.user,\n 'assignment' : assignment,\n 'peers' : {\n 1 : [],\n 2 : [],\n 3 : [],\n 4 : [],\n 5 : []\n }\n }\n\n for criteria in criterias:\n for peer_id in content['peers'].keys():\n\n peer_submission = submissions[peer_id]\n\n messages = message_service.get_messages_for(peer_submission, criteria,\\\n request.user.student)\n\n content['peers'][peer_id].append( \\\n {\"criteria\" : criteria, \\\n \"messages\": messages})\n\n # Get only URLs\n #for key in submissions:\n # submissions[key] = submissions[key].url\n\n return render_to_response(\"student/discussion.html\", \\\n {'user': request.user, \\\n 'assignment' : assignment, \\\n 'messages' : content, \\\n 'submissions' : submissions})", "def _get_discussion_styles(_helper_cfg):\n _discussion_styles = ['blog', 'contest', 'forum', 'idea', 'qanda', 'tkb']\n if 'discussion_styles' in _helper_cfg:\n if isinstance(_helper_cfg.get('discussion_styles'), list):\n _discussion_styles = _helper_cfg.get('discussion_styles')\n return _discussion_styles", "def get_parent_handle(self):\n return self.origin_handle" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move this object to a flow zone.
def move_to_zone(self, zone): if isinstance(zone, basestring): zone = self.client.get_project(self.project_key).get_flow().get_zone(zone) zone.add_item(self)
[ "def move_to(self, mobject_or_point):\n layer_center = self.feature_maps.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)", "def start_moving_to_object(self) -> None:\n\n object_position = TDWUtils.array_to_vector3(self.object_manager.transforms[self.objects[self.object_index]].position)\n object_position[\"y\"] = 0\n\n # Move to the object.\n # `arrived_at` is set to make sure that the Replicant is at a distance at which it's easy to pick up the object.\n self.replicant.move_to(target=self.objects[self.object_index], arrived_at=0.25)", "def move_zone(deck: List[Card], zone: Zone) -> None:\n for card in deck:\n card.zone = zone", "def move_to(x_coord, y_coord, state):\n\n state.zone.x = x_coord\n state.zone.y = y_coord", "def move(self):\n # spread pheromone\n self.spread()\n\n return super(DepositPheromones, self).move()", "def move(self, dest):\r\n from .server import Server\r\n server = Server(self.Server)\r\n if isinstance(dest, basestring):\r\n dest = server(dest)\r\n self.DSMove(dest)\r\n self._dsobject = server(self.Handle) # dispose cached object\r", "def _move_door(self,):\n\n pass", "def move_body(self):\n location = self.body.get_location()\n next_direction = self.determine_direction(location)\n if next_direction:\n self.body.move(next_direction)", "def move(self, new_location):\n pass", "def moveStage(scope,params,instance) -> None:\n finder = instance.finders.first()\n stage_x, stage_y, stage_z = finder.stage_x, finder.stage_y, finder.stage_z\n scope.moveStage(stage_x,stage_y,stage_z)", "def moveNorth(self):\n pass", "def mov(self, dest: Any, src: Any) -> Any:\n ...", "def move(self, direction):\n delta_y, delta_x = direction.delta()\n width, height = self.size\n y, x = self.head\n x += delta_x\n y += delta_y\n if 0 <= y and y < height and 0 <= x and x < width:\n self.head = y, x\n\n # Save the undo operation in the changelog, for restoring the\n # board after returning from a Gobstones function call.\n self.changelog.append(('move', direction.opposite()))\n else:\n raise SelfDestructionException(i18n.i18n('Cannot move'))", "def make_move(self, discussion):\n pass", "def stay_put(self):\n self.go_to(self.pos.x,self.pos.y, self.pos.theta)", "def move_transfer(self, reactorid):\n pos = self.conf['Positions']['Reactor%d' % reactorid]['transfer']\n self.move_coord(*pos)", "def move_stage(self, i, j):\n self._stage.insert(j, self._stage.pop(i))", "def flow_arrangement_enforcer(self):\n blocks = self._topology.blocks\n log.debug(\"Enforcing Flow Arrangement\")\n\n maxBlockIdx = max([x for x in blocks])\n currentIdx = 0\n while currentIdx < maxBlockIdx:\n offsetIdx = 0\n #is the current block a destination? \n if not blocks[currentIdx].isFlowDest:\n #if it's not an origin, keep going.\n if not blocks[currentIdx].isFlowOrigin:\n pass\n #If it *is* an origin, what is its destination?\n else:\n destIdx = map(lambda x: x.dest.block.index, blocks[currentIdx].flowsGoingOut)\n if len(destIdx) > 1:\n pass\n #TODO\n else:\n destBlock = blocks[destIdx[0]]\n flowsGoingInToDestBlock = destBlock.flowsComingIn\n originsOfFlowsGoingInToDestBlock = map(lambda f: f.origin.block, flowsGoingInToDestBlock)\n for o in originsOfFlowsGoingInToDestBlock:\n #Don't move the one we're sitting on (or ones we've already processed)!\n if o.index > (currentIdx+offsetIdx):\n #Move each origin of the flows going into the dest block in front of it...\n offsetIdx += 1\n self.move_block(o.index, currentIdx+offsetIdx)\n #Double check that your dest block hasn't moved:\n offsetIdx += 1\n self.move_block(destBlock.index, currentIdx+offsetIdx)\n #If it *is* a destination, shunt it to the end and keep going.\n else:\n self.move_block(currentIdx, maxBlockIdx)\n currentIdx -= 1\n #Refresh current block indices\n blocks = self._topology.blocks\n currentIdx += (offsetIdx + 1)\n log.debug(\"Finished Enforcing Flow Arrangement\")\n blocks = self._topology.blocks", "def zone(self, zonename):\n self._zone = zonename\n self._send_command(\"zone %s\" % self._zone)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the selected engine for this recipe. This method will raise if there is no selected engine, whether it's because the present recipe type has no notion of engine, or because DSS couldn't find any viable engine for running the recipe.
def get_selected_engine_details(self): if not "selectedEngine" in self.data: raise ValueError("This recipe doesn't have a selected engine") return self.data["selectedEngine"]
[ "def engine(self) -> \"DatabaseInstanceEngine\":\n return self._values.get('engine')", "def _get_engine(self, context=None):\n context = context or self.context\n engine = self.engine_class(context=context, stores=self.stores)\n return engine", "def engine_version(self) -> typing.Optional[str]:\n return self._values.get('engine_version')", "def load_engine():\n if gouda:\n settings = current_settings()\n engine = settings['engine']\n if 'libdmtx' == engine:\n return LibDMTXEngine()\n elif 'zbar' == engine:\n return ZbarEngine()\n elif 'inlite' == engine:\n return InliteEngine(settings['inlite-format'])\n else:\n raise ValueError('Unrecognised barcode reader [{0}]'.format(engine))\n else:\n raise InselectError('Barcode decoding is not available')", "def execution_engine(self):\n return self._execution_engine", "def GetSupportedEngines():\r\n pass", "def get_storage_engine(self, cursor, table_name):\n cursor.execute(\n \"\"\"\n SELECT engine\n FROM information_schema.tables\n WHERE\n table_name = %s AND\n table_schema = DATABASE()\n \"\"\",\n [table_name],\n )\n result = cursor.fetchone()\n if not result:\n return self.connection.features._mysql_storage_engine\n return result[0]", "def load_engine():\n try:\n from gouda.engines import InliteEngine, LibDMTXEngine, ZbarEngine\n except ImportError:\n raise InselectError('Barcode decoding is not available')\n else:\n settings = current_settings()\n engine = settings['engine']\n if 'libdmtx' == engine:\n return LibDMTXEngine()\n elif 'zbar' == engine:\n return ZbarEngine()\n elif 'inlite' == engine:\n return InliteEngine(settings['inlite-format'])\n else:\n raise ValueError('Unrecognised barcode reader [{0}]'.format(engine))", "def interpreter(self) -> Optional[pulumi.Input['TemplateContentsInterpreter']]:\n return pulumi.get(self, \"interpreter\")", "def available_engines() -> Sequence[\"DiffEngine\"]:\n try:\n return tuple(getattr(DiffEngine, \"_available_engines\"))\n except AttributeError:\n result = []\n try:\n result.append(DiffEngine.create(name=\"native\"))\n except ImportError:\n pass\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=True))\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=False))\n result = tuple(result)\n setattr(DiffEngine, \"_available_engines\", result)\n return result", "def get():\n if GraphEngine.__engine is None:\n GraphEngine.__engine = GraphEngine()\n return GraphEngine.__engine", "def interpreter_requirement(self):\n return self._config.get(self._section, 'interpreter_requirement')", "def getEngine(content):", "def get_backend_engine(self, name, **kwargs):\n if name not in self._engines:\n msg = \"Given settings backend is unknowed: {}\"\n raise SettingsBackendError(msg.format(name))\n\n return self._engines[name](**kwargs)", "def _get_editor(self):\n if 'EDITOR' in os.environ:\n return os.environ['EDITOR']\n\n for editor in FALLBACK_EDITORS:\n if shutil.which(editor):\n return editor\n\n raise SpawnError('Could not find an editor')", "def get_engine() -> Engine:\n global SESSION_FACTORY\n if SESSION_FACTORY is None:\n raise ValueError(\"Engine must be initialized first.\") # pragma: no cover\n # pyre-fixme[16]: `Optional` has no attribute `bind`.\n return SESSION_FACTORY.bind", "def has_storage_engine(self, target):\n if len(target) == 0:\n return True # This says we will use default engine on the server.\n\n query_str = (\n \"SELECT UPPER(engine) as engine, UPPER(support) as support \"\n \"FROM INFORMATION_SCHEMA.ENGINES\"\n )\n\n if target:\n engines = self.exec_stmt(query_str)\n for engine in engines:\n if engine[0].upper() == target.upper() and \\\n engine[1].upper() in ['YES', 'DEFAULT']:\n return True\n return False", "async def get_engine_store(\n app_state: AppState = Depends(get_app_state),\n hardware_api: HardwareControlAPI = Depends(get_hardware),\n robot_type: RobotType = Depends(get_robot_type),\n deck_type: DeckType = Depends(get_deck_type),\n) -> EngineStore:\n engine_store = _engine_store_accessor.get_from(app_state)\n\n if engine_store is None:\n engine_store = EngineStore(\n hardware_api=hardware_api, robot_type=robot_type, deck_type=deck_type\n )\n _engine_store_accessor.set_on(app_state, engine_store)\n\n return engine_store", "def get_selected_model(self):\n return self.get_selected_configuration().model", "def _get_embedded_engine(db_engine, cp):\n ZDSLOG.debug(\"Getting embedded engine\")\n if db_engine == 'sqlite':\n db_name = cp.get('DEFAULT', 'zdstack_database_name', ':memory:')\n else:\n db_name = cp.get('DEFAULT', 'zdstack_database_name', False)\n if not db_name:\n es = \"Required global option zdstack_database_name not found\"\n raise ValueError(es)\n elif db_name == ':memory:':\n es = \":memory: is only valid when using the SQLite database engine\"\n raise ValueError(es)\n db_str = '%s://' % (db_engine)\n if db_name == ':memory:':\n db_str += '/:memory:'\n else:\n db_name = resolve_path(db_name) # just to be sure\n if not os.path.isfile(db_name):\n es = \"Embedded DB file %s not found, will create new DB\"\n ZDSLOG.info(es % (db_name))\n db_str += '/' + db_name\n if db_engine == 'sqlite':\n cd = {'check_same_thread': False, 'isolation_level': 'IMMEDIATE'}\n e = create_engine(db_str, poolclass=StaticPool, connect_args=cd)\n else:\n e = create_engine(db_str, poolclass=StaticPool)\n return e" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get details about all possible engines for this recipe. This method will raise if there is no engine, whether it's because the present recipe type has no notion of engine, or because DSS couldn't find any viable engine for running the recipe.
def get_engines_details(self): if not "engines" in self.data: raise ValueError("This recipe doesn't have engines") return self.data["engines"]
[ "def available_engines() -> Sequence[\"DiffEngine\"]:\n try:\n return tuple(getattr(DiffEngine, \"_available_engines\"))\n except AttributeError:\n result = []\n try:\n result.append(DiffEngine.create(name=\"native\"))\n except ImportError:\n pass\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=True))\n result.append(DiffEngine.create(name=\"plain\", hash_optimization=False))\n result = tuple(result)\n setattr(DiffEngine, \"_available_engines\", result)\n return result", "def GetSupportedEngines():\r\n pass", "async def available_engines():\n return AvailableEnginesResponse(__root__=sorted(search_engines.keys()))", "def scrape_engines(self) -> list:\r\n cars = self.__cars\r\n engines = []\r\n for car in cars:\r\n try:\r\n engine = (\r\n car.find(\"div\", class_=\"announcement-title\")\r\n .text.strip()\r\n .split(\",\")[1]\r\n .split()[0]\r\n )\r\n except:\r\n engine = None\r\n engines.append(engine)\r\n self.__engines = [*self.__engines, *engines]\r\n return engines", "def engineList(self, targets):\n if isinstance(targets, int):\n if targets not in self.engines.keys():\n log.msg(\"Engine with id %i is not registered\" % targets)\n raise error.InvalidEngineID(\"Engine with id %i is not registered\" % targets)\n else: \n return [self.engines[targets]]\n elif isinstance(targets, (list, tuple)):\n for id in targets:\n if id not in self.engines.keys():\n log.msg(\"Engine with id %r is not registered\" % id)\n raise error.InvalidEngineID(\"Engine with id %r is not registered\" % id) \n return map(self.engines.get, targets)\n elif targets == 'all':\n eList = self.engines.values()\n if len(eList) == 0:\n raise error.NoEnginesRegistered(\"There are no engines registered. \"\n \"Check the logs if you think there should have been.\")\n else:\n return eList\n else:\n raise error.InvalidEngineID(\"targets argument is not an int, list of ints or 'all': %r\"%targets)", "def _get_search_engines():\n global _engines\n if _engines:\n return _engines\n\n matomo_engines = _get_matomo_engines()\n # Engine names are the first param of each of the search engine arrays\n # so we group by those guys, and create our new dictionary with that\n # order\n _engines = {}\n\n for engine_name, rule_group in iteritems(matomo_engines):\n defaults = {\n 'extractor': None,\n 'link_macro': None,\n 'charsets': ['utf-8'],\n 'hiddenkeyword': None\n }\n\n for rule in rule_group:\n if any(url for url in rule['urls'] if '{}' in url):\n rule['urls'] = _expand_country_codes(rule['urls'])\n for i, domain in enumerate(rule['urls']):\n if i == 0:\n defaults['extractor'] = rule['params']\n if 'backlink' in rule:\n defaults['link_macro'] = rule['backlink']\n if 'charsets' in rule:\n defaults['charsets'] = rule['charsets']\n if 'hiddenkeyword' in rule:\n defaults['hiddenkeyword'] = rule['hiddenkeyword']\n\n _engines[domain] = SearchEngineParser(engine_name,\n defaults['extractor'],\n defaults['link_macro'],\n defaults['charsets'],\n defaults['hiddenkeyword'])\n\n return _engines", "def get_scoring_engine_list(self):", "def com_day_cq_wcm_scripting_bvp_script_engines(self) -> ConfigNodePropertyArray:\n return self._com_day_cq_wcm_scripting_bvp_script_engines", "def _get_available_engine_upgrades(client, major=False):\n results = {}\n paginator = client.get_paginator('describe_db_engine_versions')\n for page in paginator.paginate():\n engine_versions = page['DBEngineVersions']\n for v in engine_versions:\n if v['Engine'] not in results:\n results[v['Engine']] = {}\n if 'ValidUpgradeTarget' not in v or len(v['ValidUpgradeTarget']) == 0:\n continue\n for t in v['ValidUpgradeTarget']:\n if not major and t['IsMajorVersionUpgrade']:\n continue\n if LooseVersion(t['EngineVersion']) > LooseVersion(\n results[v['Engine']].get(v['EngineVersion'], '0.0.0')):\n results[v['Engine']][v['EngineVersion']] = t['EngineVersion']\n return results", "def engine_state(self) -> List[EngineState]:\n return self._engine_state", "def get_valid_engines(path_name):\n valid_engines = ['chakra', 'jsc', 'spidermonkey', 'v8']\n \n # get string after seeds/ pattern\n pattern = r\"(?<=seeds\\/).*$\"\n regex = re.compile(pattern, re.IGNORECASE)\n name = regex.search(path_name).group()\n \n # check if engine should not ran this file\n if name in SEED_INVALID['chakra']:\n valid_engines.remove('chakra')\n elif name in SEED_INVALID['spidermonkey']:\n valid_engines.remove('spidermonkey')\n elif name in SEED_INVALID['jsc']:\n valid_engines.remove('jsc')\n elif name in SEED_INVALID['v8']:\n valid_engines.remove('v8')\n\n return valid_engines", "def get_oem_inventory(self):\n for desc in self.get_oem_inventory_descriptions():\n yield (desc, self.get_inventory_of_component(desc))", "def get_engine_containers(self):\n cont_list = self.get_running_containers()\n ee_infos = {}\n for c in cont_list:\n cname = c[\"ee_info\"].get(\"name\", \"\")\n ee_infos.setdefault(cname, []).append(c)\n for eng_cont_list in ee_infos.values():\n eng_cont_list.sort(key=lambda ci: ci[\"ts_created\"])\n return ee_infos", "async def get_engine_store(\n app_state: AppState = Depends(get_app_state),\n hardware_api: HardwareControlAPI = Depends(get_hardware),\n robot_type: RobotType = Depends(get_robot_type),\n deck_type: DeckType = Depends(get_deck_type),\n) -> EngineStore:\n engine_store = _engine_store_accessor.get_from(app_state)\n\n if engine_store is None:\n engine_store = EngineStore(\n hardware_api=hardware_api, robot_type=robot_type, deck_type=deck_type\n )\n _engine_store_accessor.set_on(app_state, engine_store)\n\n return engine_store", "def engine(self) -> \"DatabaseInstanceEngine\":\n return self._values.get('engine')", "def getEVs():\n vehTypes = getAllVehicleTypes()\n return [veh for veh in vehTypes if veh['vehType'] in [ER, C_ER]]", "def get_supported_entities():\n return analyzer_engine().get_supported_entities()", "def have_engine(cnx, engine):\n have = False\n engine = engine.lower()\n\n cur = cnx.cursor()\n # Should use INFORMATION_SCHEMA, but play nice with v4.1\n cur.execute(\"SHOW ENGINES\")\n rows = cur.fetchall()\n for row in rows:\n if row[0].lower() == engine:\n if row[1].lower() == 'yes':\n have = True\n break\n\n cur.close()\n return have", "def getEngine(content):", "def clean_engines(self) -> None:\n self.engine_module = None\n self.aux_engine_module = None\n self.tokens_module = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save back the recipe in DSS.
def save(self): self._payload_to_str() return self.recipe.client._perform_json( "PUT", "/projects/%s/recipes/%s" % (self.recipe.project_key, self.recipe.recipe_name), body=self.data)
[ "def saveShelf():\n pass", "def SaveState(self):\n obj_file = open(self.Name + '.rlx', 'w')\n dump(self, obj_file)", "def save(self): \r\n dataPath, metaPath, zipPath = self.expPath() \r\n self.save_as(self.data, dataPath) \r\n self.save_as(self.metadata, metaPath)\r\n # zip(dataPath, metaPath, zipPath)\r\n # os.remove(dataPath)\r\n # os.remove(metaPath)\r\n return", "def save(self, path):", "def _save(self):\n\t\twith open(self.path, 'w') as f:\n\t\t\tf.write(self.source)", "def save(self):\n sym = self.symbol.get()\n if len(sym) != 1:\n print(sym)\n return\n sg = self.leds_as_string()\n if sg == '0'*8*8:\n return self.update_info(\"Empty symbol\")\n self.data[sym] = sg\n with open(savePath, 'wb') as font:\n pk.dump(self.data, font)", "def save(self, output, data):\r\n pass", "def dbSave(self, env):\n\t\traise NotImplementedError, 'Flat File Saving Not Implemented'", "def save_persist(self) -> None:\n self.sys_addons.data.save_data()", "def save(self):\n pickle.dump(self.keyValue, open(\"brain.dump\", \"w+\"))\n print \"Successfully saved file\"", "def save(self):\n s = load_stack(self)\n if not load_stack(self):\n save_stack(self)", "def save(self) -> None:\n with self.journal_path.open('wb') as journal_file:\n pickle.dump(self.store, journal_file, pickle.HIGHEST_PROTOCOL)", "def saveAllShelves():\n pass", "def save(self):\n self.vis.save([self.vis.env])", "def save_recipe():\n### FROM random_recipes_search.html \n\n recipe_info = literal_eval(request.args.get(\"recipe\"))\n (recipe_url, recipe_image, recipe_name, recipe_id) = recipe_info\n\n recipe_entry = Recipe.query.filter_by(recipe_id=recipe_id).first()\n\n # add entry to recipes table if recipe does not already exist\n if not recipe_entry:\n new_recipe_entry = Recipe(recipe_image=recipe_image, recipe_id=recipe_id,\n recipe_name=recipe_name, recipe_url=recipe_url)\n db.session.add(new_recipe_entry)\n db.session.commit()\n\n session['recipe_id'] = recipe_id\n\n # payload = get_movie_payload()\n # payload.update({'page': randint(1,50)})\n\n # response = requests.get(MOVIEDB_URL + \"discover/movie\", params=payload)\n # data = response.json()\n # movies = data['results']\n flash(\"Recipe successfully saved!\", 'alert-success')\n return redirect('/display_random_recipes')", "def save_state(self):\n self.save(get_config_path(self.uuid + \".state\"), state=True, compress=True)", "def save(self, path):\n path = os.path.abspath(path)\n if not os.path.exists(path):\n os.mkdir(path)\n self.save_depository(os.path.join(path, 'depository'))\n self.save_libraries(os.path.join(path, 'libraries'))\n self.save_groups(os.path.join(path, 'groups'))", "def _save(self):\n settings = self._settings\n self.data_format = settings.data_format\n self.byte_order = settings.byte_order\n self.restore = True", "def save(self, **kwargs):\n self.instance.unarchive()", "def _save_data(self):\n if not os.path.exists(self.folder):\n self._create_folder()\n\n for revision in self.revisions:\n revision.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the type of the recipe.
def type(self): return self.recipe_settings["type"]
[ "def read_type(self):\n return type_get_read_type(self)", "def get_type(self) -> str:\n return self.request_type", "def type(self):\n return self.kwargs.get(\"type\", str)", "def get_recipes_by_types(self, recipe_type):\n return self.recopies_list[recipe_type]", "def get_recipes_by_types(self, recipe_type):\n pass", "def term_type(self):\n return self._term_type", "def getTypeHelper(self):\n return getStepTypeHelper(self.data)", "def get_type(self):\n return self.level_type[0]", "def get_type(self) -> ModelType:\n pass", "def type_of(self, name):\n return self.find_symbol_by_name(name).get(\"type\")", "def type(self) -> type or TypeVar:\n return self._type", "def type(self):\r\n try:\r\n if self and self._typestring in self.resp.dict:\r\n return self.resp.dict[self._typestring]\r\n # Added for object type\r\n elif self and \"type\" in self.resp.dict:\r\n return self.resp.dict[\"type\"]\r\n except (AttributeError, ValueError, TypeError):\r\n return (\r\n self.deftype\r\n ) # data not yet fetched, probably empty dict, so assume deftype\r\n return None", "def entry_type(self):\n return self.__entry_type", "def type(self):\n return self.rt", "def shell_type(self):\n return get_kind(type(self))", "def get_label_type(data):\n return get_label_for(data, \"type: \")", "def get_object_type(self):\n object_type = self.request_data[\"object\"][\"type\"]\n self.logger.debug('get_object_type: %s' % object_type)\n return object_type", "def get_type(self, id):\n if id not in self._opts:\n return None\n\n return self._opts[id][1]", "def getDrinkType(self):\n return self.getName()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the recipe definition.
def get_recipe_raw_definition(self): return self.recipe_settings
[ "def recipe(self):\n if self.db.recipe:\n from world.dominion.models import CraftingRecipe\n try:\n recipe = CraftingRecipe.objects.get(id=self.db.recipe)\n return recipe\n except CraftingRecipe.DoesNotExist:\n pass", "def definition_file(self):\n return get_product_definition_file(self)", "def get_model_recipe(self, model_name: str) -> Optional[DictConfig]:\n recipe_file = self._get_file(model_name, AutoNACFileName.RECIPE_YAML)\n if recipe_file is None:\n return None\n\n config_name = os.path.basename(recipe_file)\n download_dir = os.path.dirname(recipe_file)\n\n return load_recipe(config_name=config_name, recipes_dir_path=download_dir)", "def id(self):\n return self.recipe_name", "def definition(self):\n return self._bound_context.get_function_def(self.name)", "def get_definition(self) -> LabwareDefinitionDict:\n return cast(LabwareDefinitionDict, self._definition.dict(exclude_none=True))", "def recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SoftwareRecipeArgs']]]]:\n return pulumi.get(self, \"recipes\")", "def __str__(self):\n return \"\"\"Recipe class containing info about name, cooking_lvl,\n ingredients, recipe_type and description\"\"\"\n return txt", "def task_definition(self) -> pulumi.Output['pulumi_aws.ecs.TaskDefinition']:\n return pulumi.get(self, \"task_definition\")", "def get_recipe(recipes: dict, raw_materials: dict, recipe_id: str) -> Recipe:\n\n recipe = recipes[recipe_id]\n return convert_recipe(raw_materials, recipe)", "def type(self):\n return self.recipe_settings[\"type\"]", "def create(self, recipe_name: str):\n recipe = self.recipes.get(recipe_name)\n if recipe is None:\n raise BuildException(f\"No recipe for {recipe_name}\")\n if not self.inventory.subtract(recipe.ingridients):\n raise BuildException(f\"No resources for recipe {recipe}\")\n return recipe.factory()", "def get_recipe_by_name(self, name):\n pass", "def set_definition_and_payload(self, definition):\n warnings.warn(\"Recipe.set_definition_and_payload is deprecated, please use get_settings\", DeprecationWarning)\n definition._payload_to_str()\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s\" % (self.project_key, self.recipe_name),\n body=definition.data)", "def word_definition(word):\n wdef_endpoint=\"/word/{0}/definitions?api_key={1}\".format(word,api_key)\n data = requester(wdef_endpoint)\n \n definition = list()\n \n if(data['status'] == 200): \n for i in data['data']:\n definition.append(i['text'])\n else: \n definition.append('No Definitions for the word'.format(word))\n \n return definition", "def parse_section(self, name):\n options = dict(self.parser.items(name))\n factory_string = self._get_string(name, 'recipe', DEFAULT_RECIPE)\n recipe = self.load_recipe(factory_string, name, options)\n requirements = self._get_list(name, 'requires')\n recipe.requirements = [self.parse_section(req) for req in requirements]\n parts = self._get_list(name, 'parts')\n recipe.parts = [self.parse_section(part) for part in parts]\n return recipe", "def recipes(self) -> pulumi.Output[Sequence['outputs.SoftwareRecipeResponse']]:\n return pulumi.get(self, \"recipes\")", "def optReadDefinition(*args):\n return _optcc.optReadDefinition(*args)", "def getDefinition(self, identifier):\n identifier = str(identifier).lower();\n\n if not self.hasDefinition(identifier):\n raise InvalidArgumentException(\n 'The service definition \"{0}\" does not exist.'\n ''.format(identifier)\n );\n\n return self.__definitions[identifier];", "def get_node_definition(self, node_display_name: str) -> str:\n node_label = self.get_node_label(node_display_name)\n\n if not node_label:\n return \"\"\n\n mm_graph = self.se.get_nx_schema()\n node_definition = mm_graph.nodes[node_label][\"comment\"]\n\n return node_definition" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }