query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Counts the how many rows belong to each class in a dataset.
def classCounts(rows): counts = {} for row in rows: # in our data set format, the label is always the last column label = row[-1] if label not in counts: counts[label] = 1 else: counts[label] += 1 return counts
[ "def get_class_counts(data):\n y = data['class']\n num_pos = np.count_nonzero(y == 1)\n num_neg = np.count_nonzero(y == 0)\n return (num_neg, num_pos)", "def count(self, cls=None):\n all_classes = classes.values()\n if cls:\n counter = len(models.storage.all(cls).values())\n\n else:\n counter = 0\n for element in all_classes:\n counter += len(models.storage.all(element).values())\n\n return counter", "def __count(self, data, instance):\n cmp = (data == instance)\n cmp = [True if i.all() else False for i in cmp]\n num = np.sum(cmp)\n return num", "def do_count(self, arg):\n if not self.validate_len_args(arg):\n return\n class_name = self.validate_class_name(arg)\n if not class_name:\n return\n\n class_list = storage.filter_by_class(class_name)\n print(len(class_list))", "def get_num_classes(self, label_key=None):", "def number_of_labels_per_class(labels):\n number_samples = []\n n_classes = number_of_class(labels)\n for n in range (n_classes):\n number_samples.append(np.count_nonzero(labels == n))\n return number_samples", "def get_classes_counts(self):\n return self.classes_counts", "def count_data(self):\n num_data = 0\n for in_file_name in self.file_names:\n h5_file = h5py.File( in_file_name, 'r' )\n X = h5_file[self.features_name]\n if hasattr(X, 'keys'):\n num_data += len(X[ list(X.keys())[0] ])\n else:\n num_data += len(X)\n h5_file.close()\n return num_data", "def classes_count(self):\n return self._classes_count", "def count_data(self):\n num_data = 0\n for cur_file_name in self.file_names:\n cur_file_features, cur_file_labels = self.load_data(cur_file_name)\n num_data += self.get_num_samples( cur_file_features )\n return num_data", "def get_n_samples(csv, n_classes):\n df = pd.read_csv(csv)\n\n nums = [0 for i in range(n_classes)]\n for i in range(len(df)):\n cls_id = df.iloc[i]['cls_id']\n nums[cls_id] += 1\n\n return nums", "def getClassCounts(examples,className):\n counts_class = {}\n \n for e in examples:\n class_val = e[className]\n if class_val not in counts_class:\n counts_class[class_val] = 0\n counts_class[class_val] += 1\n\n return counts_class", "def get_class_count(self):\n for rec in self:\n count = self.env['assign.class'].search_count(\n [('professor_id', '=', rec.id)])\n rec.extra_class_count = count", "def count(self, dataset, **options):\n url = self.url + \"/\" + dataset\n response = self.session.head(url, params=options)\n count = response.headers.get(\"X-Query-Record-Count\")\n return int(count)", "def count_datasets(self, desc):\n datasets_count = 0\n for dataset in desc['dataset']:\n if len(dataset[\"P\"]) == len(dataset[\"Q\"]) and len(dataset[\"P\"]) == len(dataset[\"T\"]):\n datasets_count = datasets_count + len(dataset[\"P\"])\n else:\n raise ValueError('Dataset size does\\'t match.')\n return datasets_count", "def get_num_batches(self, dataset: Dataset) -> int:\n raise NotImplementedError", "def n_classes(self):\n return self.hypnogram.n_classes", "def class_counts(image_list, print_counts=False, superclass_map=None):\n if superclass_map is None:\n class_list = DEEP_FASHION_CLASSES\n else:\n class_list = superclass_map.values()\n count_dict = {key: 0 for key in class_list}\n for img in image_list:\n count_dict[class_from_filename(img, superclass_map)] += 1\n if print_counts:\n for cl, count in count_dict.items():\n print(cl, count)\n return count_dict", "def count_samples(ctx):\n print(\"loading data...\")\n images, labels = load_data(ctx.obj[\"data_folder\"], shuffle_seed=ctx.obj[\"seed\"])\n\n print(\"\")\n print(\"enumerated sample counts:\")\n for key, arr in list(zip(label_mapping, numpy.transpose(keras.utils.to_categorical(labels)))):\n print(f\" - {key}: {int(sum(arr))}\")\n print(\"total: \", len(images))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if a value is numeric.
def isNumeric(value): return isinstance(value, int) or isinstance(value, float)
[ "def _is_numeric(v):\n try:\n float(v)\n return True\n except ValueError:\n return False", "def is_numeric(x) -> bool:\n try:\n x = float(x)\n return True\n except ValueError:\n return False", "def is_numeric(x):\n \n try:\n float(x)\n return True\n except ValueError:\n return False", "def _is_number(value):\n if isinstance(value, int) or isinstance(value, float):\n return True\n return False", "def is_numeric(self, f: Feature) -> bool:\n return self.features[id(f)].is_numeric()", "def isnumber(cls, value):\n if isinstance(value, (int, long, float)): # True if value is already a number\n return True\n try:\n float(value) # Test if this can be converted to a number\n return True\n except:\n return False", "def is_numeric(attribute):\n colType = attribute[1]\n return 'int' in colType or 'float' in colType", "def isSimpleNumeric(x):\n \n return ((type(x)==int)or(type(x)==float))", "def isnum(tok):\n try:\n float(tok)\n return True\n except:\n return False", "def _isnumeric(s):\n for c in s:\n if c in '0123456789-.':\n numeric = True\n else:\n return False\n return numeric", "def require_numeric(self):\n return self._require_numeric", "def _assert_type_numeric(self, name, val):\n self._assert_type(name, val, (int, long, float))", "def is_number(self):\n self.number = re.sub(r'[^\\d]', '', self.number)\n return self.number.isdigit()", "def _is_numeric(df, column):\n\n if str(df[column].dtypes) == 'int64' or \\\n str(df[column].dtypes) == 'float64':\n return True\n else:\n return False", "def isNumber(self) -> bool:\n if self.tokenLeft():\n return self.currentToken().type == \"num\"\n else:\n return False", "def is_float_or_number(value: Any) -> bool:\r\n if isinstance(value, float):\r\n return True\r\n if is_number(value=value):\r\n return True\r\n return False", "def locale_type_is_numeric(self, locale_type):\n # This case fixes Ohio, because county is numeric but db field is text\n if (locale_type == self.primary_locale_type) and \\\n self.data['numeric_primary_locale']:\n return True\n\n locale_field = self.get_locale_field(locale_type)\n field_type = self.data['columns'][locale_field]\n if (\"int\" in field_type) or (field_type == \"float\") or \\\n (field_type == \"double\"):\n return True\n else:\n return False", "def __isNumeric(self, arr):\n try:\n return arr.dtype.kind in 'biufc'\n except AttributeError:\n return False", "def is_numeric_char(char):\n return char.isdigit() or char == \".\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the Gini Impurity for a list of rows.
def gini(rows): counts = classCounts(rows) impurity = 1 for label in counts: labelProbability = counts[label] / float(len(rows)) impurity -= labelProbability ** 2 return impurity
[ "def findImpurity(self, rows):\n isEntropy = self.criterion == 'entropy'\n counts = class_counts(rows)\n impurity = 0 if isEntropy else 1\n #Gini = 1 - sum(pi**2)\n if isEntropy:\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl * math.log(prob_of_lbl, 2)\n else:\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n\n return impurity", "def gini_impurity(y):\n hist = np.bincount(y)\n n = np.sum(hist)\n gini_impurity = 1 - sum([(i/n)**2 for i in hist])\n return gini_impurity", "def gini(self, groups, labels):\n n = sum([len(group) for group in groups])\n gini_impurity = 0.0\n for group in groups:\n group_size = len(group)\n if group_size != 0:\n score = 0.0\n for label in labels:\n frac_positive = [data_point.class_ for data_point in group].count(\n label) / group_size # P(C) = positive / (positive + negative)\n score += frac_positive ** 2\n gini_impurity += (1 - score) * (group_size / n)\n return gini_impurity", "def gini(array):\n # based on bottom eq:\n # http://www.statsdirect.com/help/generatedimages/equations/equation154.svg\n # from:\n # http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n # All values are treated equally, arrays must be 1d:\n array = array.flatten()\n if np.amin(array) < 0:\n # Values cannot be negative:\n array -= np.amin(array)\n # Values cannot be 0:\n array = array + 0.0000001\n # Values must be sorted:\n array = np.sort(array)\n # Index per array element:\n index = np.arange(1,array.shape[0]+1)\n # Number of array elements:\n n = array.shape[0]\n # Gini coefficient:\n return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))", "def gini_impurity(self, y):\r\n\r\n #########################################################################\r\n # TODO: Implement Gini impurity #\r\n #########################################################################\r\n gini = None\r\n\r\n #########################################################################\r\n # END OF YOUR CODE #\r\n ######################################################################### \r\n return gini", "def gini(values: Tensor) -> Tensor:\n\n output_vector = []\n\n for vector in values:\n\n if torch.mean(vector) == 0:\n gini_index = 0\n else:\n mean_abs_difference = torch.mean(torch.abs(torch.Tensor(np.subtract.outer(vector, vector))))\n relative_mean_abs_difference = mean_abs_difference / torch.mean(vector)\n gini_index = 0.5 * relative_mean_abs_difference\n\n output_vector.append(gini_index)\n\n return torch.Tensor(output_vector).reshape(-1, 1)", "def calculate_gini(self,prod_ownership_new, prod_ownership_old, product_probs, binary = True):\n \n Gini = pd.DataFrame(columns = prod_ownership_new.columns) \n if binary:\n for i in range(0, len(prod_ownership_new.columns)):\n prod_probs = product_probs[:,i,:] \n \n # Get the households who did NOT have product in prev period\n n_i = len(prod_ownership_old[prod_ownership_old.iloc[:,i]==0]) \n select = (prod_ownership_old.iloc[:,i]==0)\n col = prod_ownership_new.columns[i]\n \n # Percentage of those households who now do own the product\n change = prod_ownership_new.loc[select,col] # todo check that this selects the right thing\n mu_i = (sum(change) / len(change))*100 # percentage that is 1\n \n # Get the sum of probabilities for >0 of the product\n prod_own = prod_probs[:,1:].sum(axis=1) \n \n # Ranked probabilities - \n # We want the person with the highest probability to get the lowest rank\n probranks = pd.DataFrame(prod_own).rank( ascending = False) #method = 'max'\n # NOW SELECT THE ONES THAT BELONG TO THE NON-OWNING GROUP\n probranks = probranks[select]\n \n sumrank = 0\n for k in range(0,len(probranks)): # we sum only over the select households?\n #sumrank += probranks.iloc[k,0] * prod_ownership_new.loc[k,col]\n sumrank += probranks.iloc[k,0] * change.reset_index(drop=True)[k]\n \n Gini_i = 1 + (1/n_i) - ( 2 / ( (n_i**2)*mu_i ) )*sumrank \n Gini.loc[0,col] = Gini_i\n \n else: # the prod ownerships should be numbers of products\n for i in range(0, len(prod_ownership_new.columns)):\n # get the different possible values of ownerships\n values = pd.Series(prod_ownership_old.iloc[:,i].unique()).sort_values()\n prod_probs = product_probs[:,i,:] # get probs for this segment\n \n for j in values: \n # Number of households who did NOT have this exact number of products\n n_i = len(prod_ownership_old[prod_ownership_old.iloc[:,i]!=j])\n select = (prod_ownership_old.iloc[:,i]!=j)\n col = prod_ownership_new.columns[i]\n \n # Make a dummy for # of products ownership in the new period\n ownership_new_dummy = pd.Series(np.zeros(len(prod_ownership_new)))\n ownership_new_dummy[prod_ownership_new.iloc[:,i] == j] = 1\n ownership_new_dummy = ownership_new_dummy[select]\n \n # Percentage of the selected households who now do own the product\n mu_i = (sum(ownership_new_dummy) / len(ownership_new_dummy))*100 # percentage that has changed\n #TODO does this need to be *100 ????\n \n # Get the sum of probabilities for exactly j of the product\n prod_own = prod_probs[:,int(j)]\n \n # Ranked probabilities - \n # We want the person with the highest probability to get the lowest rank\n probranks =pd.DataFrame(prod_own).rank(ascending = False) #method='max', \n # NOW SELECT THE ONES THAT BELONG TO THE NON-OWNING GROUP\n probranks = probranks[select]\n \n sumrank = 0\n for k in range(0,len(probranks)):\n sumrank += probranks.iloc[k,0] * ownership_new_dummy.iloc[k]\n \n Gini_i = 1 + (1/n_i) - ( 2 / ( (n_i**2)*mu_i ) )*sumrank \n \n Gini.loc[int(j),col] = Gini_i \n return Gini", "def attribute_impurity(self, classifier, attribute): # Calculates the gini index value for a particular attribute #{{{\n attribute_values_count = {} # Counts of the classification values\n attribute_values_set = set()\n attr_class_values_count = {} # Counts of the class values\n attr_class_values_set = set()\n forest = {}\n gini_summation = 0.0\n for datum in self.data: # From the values of the classifier, create a set\n attribute_values_set.add(datum.get(attribute))\n attr_class_values_set.add(datum.get(classifier))\n print \"Attribute value set\"\n print attribute_values_set\n print \"class Attribute value set\"\n print attr_class_values_set\n\n attribute_values_count = dict.fromkeys(attribute_values_set, 0) # From the set of classifier values, create a dictionary for counting them\n attr_class_values_count = dict.fromkeys(attr_class_values_set, 0) # From the set of classifier values, create a dictionary for counting them\n for datum in self.data:\n attribute_values_count[datum.get(attribute)] += 1\n attr_class_values_count[datum.get(classifier)] += 1\n print \"Attribute value counts\"\n print attribute_values_count\n print \"class Attribute value counts\"\n print attr_class_values_count\n print \"length of data\"\n print float(len(self.data))\n temp_gini_index = 0.0\n for key in attribute_values_count.keys():\n print \"Key\"\n print key\n print \"counts\"\n print attr_class_values_count.get(key)\n temp_gini_index += float(((float(attr_class_values_count.get(key)))/(float(len(self.data)))))**2.0\n gini_summation = temp_gini_index\n gini_index = 1-gini_summation\n print gini_index\n return gini_index\n # }}}", "def test_gini_coefficient(self):\n # From Catalano et al 2009, table 3, page 11 \n # Available here:\n #http://scholarcommons.usf.edu/cgi/viewcontent.cgi?article=1032&context=numeracy\n proportion_of_population = [0.1*i for i in range (11)]\n cumulative_portion_of_consumption =\\\n [0.000,\\\n 0.023,\\\n 0.060,\\\n 0.110,\\\n 0.175,\\\n 0.254,\\\n 0.345,\\\n 0.459,\\\n 0.588,\\\n 0.754,\\\n 1.000]\n points = zip(proportion_of_population,\\\n cumulative_portion_of_consumption)\n \n obs = gini_coefficient(points)\n exp = 0.346 \n self.assertFloatEqual(obs,exp,eps=1e-3)", "def gini(clusters_labels):\n import pandas as pd\n \n # Get frequencies from clusters_labels\n clusters_labels = pd.Series(clusters_labels)\n frequencies = clusters_labels.value_counts()\n\n # Mean absolute difference\n mad = frequencies.mad()\n\n # Mean frequency of clusters\n mean = frequencies.mean()\n\n # Gini coefficient\n gini_coeff = 0.5 * mad / mean\n\n return gini_coeff", "def giniIndex(classCounts):\n res = 1.0\n s = sum(classCounts)\n for cl in classCounts:\n quout = (float(cl) / s)\n neg = -pow(quout, 2)\n res += neg\n return res", "def __cal_gini(self, y):\n gini_index = 1\n y = list(y)\n for unique_val in set(y):\n p = y.count(unique_val) / len(y)\n gini_index -= p**2\n return gini_index", "def compute_transition_impurities_v1(self, nints=[]):\n if nints == []: nints = self.leaf_nints\n for nint in nints:\n leaf = self.node(nint)\n # Filter samples down to those whose successor is *not* in this leaf.\n last_indices = leaf.indices[np.nonzero(self.nint[self.n[leaf.indices]] != nint)]\n # Get the count for each next leaf.\n leaf.next_nints, leaf.next_nint_counts = np.unique(self.nint[self.n[last_indices]], return_counts=True)\n leaf.next_nints = list(leaf.next_nints)\n # Compute the Gini impurity of these counts.\n counts_sum = np.sum(leaf.next_nint_counts)\n leaf.transition_impurity = 1 - (np.sum(leaf.next_nint_counts**2) / counts_sum**2)\n leaf.transition_impurity_sum = leaf.transition_impurity * leaf.num_samples \n\n # Return the leaf integers, sorted by transition_impurity_sum.\n return sorted({nint:self.node(nint).transition_impurity_sum for nint in self.leaf_nints}.items(), key = lambda x: x[1])", "def compute_gini(y_true, y_predict): \n return roc_auc_score(y_true, y_predict) * 2 - 1", "def impurity_feature_importance(self):\n feature_importances = np.zeros(self.n_features)\n total_samples = self.n_samples[0]\n for node in range(len(self.impurities)):\n if self.is_leaf(node):\n continue \n spit_feature = self.split_features[node]\n impurity = self.impurities[node]\n n_samples = self.n_samples[node]\n # calculate score\n left, right = self.tree_.get_children(node)\n lhs_gini = self.impurities[left]\n rhs_gini = self.impurities[right]\n lhs_count = self.n_samples[left]\n rhs_count = self.n_samples[right]\n score = (lhs_gini * lhs_count + rhs_gini * rhs_count)/n_samples\n # feature_importances = (decrease in node impurity) * (probability of reaching node ~ proportion of samples)\n feature_importances[spit_feature] += (impurity-score) * (n_samples/total_samples)\n\n feature_importances = feature_importances/feature_importances.sum()\n return feature_importances", "def calculate_impurity_dict(df, impurity_measure):\n categories = np.unique(df.iloc[:, 1])\n impurity_dict = {}\n for cat_x in categories:\n # calculate weighted sum of impurity for each column\n impurity = 0\n if impurity_measure == 'gini':\n impurity = 1\n for cat_y in np.unique(df.iloc[:, 0]):\n col_name = df.columns.values[1]\n split = df.loc[df[col_name] == cat_x]\n count = split.iloc[:, 0].tolist().count(cat_y) # count of each category in y\n total = len(split)\n if count is not 0 and total is not 0:\n if impurity_measure == 'gini':\n intermediate = intermediate_gini(count, total)\n else:\n intermediate = intermediate_entropy(count, total)\n impurity -= intermediate\n\n impurity_dict[cat_x] = impurity\n\n return impurity_dict", "def compute_transition_impurities_v2(self, nints=[]):\n if nints == []: nints = self.leaf_nints\n for nint in nints:\n leaf = self.node(nint)\n # Recompute next_nints for all samples in this leaf.\n first_indices = leaf.indices[np.nonzero(self.nint[self.p[leaf.indices]] != nint)] \n for index in first_indices:\n next_nint, sequence = self.get_next_nint(index)\n self.next_nint[sequence] = next_nint\n # Get the count for each next leaf.\n leaf.next_nints, leaf.next_nint_counts = np.unique(self.next_nint[leaf.indices], return_counts=True)\n leaf.next_nints = list(leaf.next_nints)\n # Compute the Gini impurity of these counts.\n leaf.transition_impurity_sum = leaf.num_samples - (np.sum(leaf.next_nint_counts**2) / leaf.num_samples)\n leaf.transition_impurity = leaf.transition_impurity_sum / leaf.num_samples \n\n # Return the leaf integers, sorted by transition_impurity_sum.\n return sorted({nint:self.node(nint).transition_impurity_sum for nint in self.leaf_nints}.items(), key = lambda x: x[1])", "def get_gain(row):\n gap = row.price_y - row.price_x\n if not row.buying:\n gap = - gap\n return gap * row.quantity", "def compute_transition_impurities_v3(self, nints=[]):\n if nints == []: nints = self.leaf_nints\n for nint in nints:\n leaf = self.node(nint)\n # Filter samples down to those whose successor is *not* in this leaf.\n last_indices = leaf.indices[np.nonzero(self.nint[self.n[leaf.indices]] != nint)]\n # Get the count for each next leaf.\n leaf.next_nints, leaf.next_nint_counts = np.unique(self.nint[self.n[last_indices]], return_counts=True)\n leaf.next_nints = list(leaf.next_nints)\n # Compute the Gini impurity of these counts.\n counts_sum = np.sum(leaf.next_nint_counts)\n leaf.transition_impurity = 1 - (np.sum(leaf.next_nint_counts**2) / counts_sum**2)\n leaf.transition_impurity_sum = leaf.transition_impurity * leaf.num_samples \n\n # Return the leaf integers, sorted by transition_impurity_sum.\n return sorted({nint:self.node(nint).transition_impurity_sum for nint in self.leaf_nints}.items(), key = lambda x: x[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of tuples (article_id, prediction, label). Predicts tensorflow dataset in batches.
def batch_predict(tf_ds, batch_size, prediction_func): evaluation_data = [] for aids, inps, lbls in tf_ds.batch(batch_size).as_numpy_iterator(): ps = prediction_func(inps) evaluation_data += zip(aids, ps, lbls) return evaluation_data
[ "def get_train_data(self, batch_size):\n train = self._get_pickle('train')\n batch_a, batch_l = [], []\n questions, answers, labels = [], [], []\n\n for item in train:\n for answer_id in item['answers']:\n batch_a.append(self._get_answer(answer_id))\n batch_l.append(1)\n if len(batch_a) > batch_size:\n print(item['answers'])\n raise ValueError(\n \"The number of correct answers: %d is bigger than the batch_size: %d\"\n \"Consider increasing the batch_size\" % (len(batch_a), batch_size)\n )\n while(len(batch_a) < batch_size):\n batch_a.append(self._get_pool_answer(item['answers']))\n batch_l.append(0)\n\n questions.append(self._translate_sent(item['question']))\n answers.append(batch_a)\n labels.append(batch_l)\n\n batch_a, batch_l = [], []\n\n return questions, answers, labels", "def predict_from_batch(self, batch: pd.DataFrame):\n pass", "def get_preds_and_labels(model, generator):\r\n preds = []\r\n labels = []\r\n for _ in range(int(np.ceil(generator.samples / BATCH_SIZE))):\r\n x, y = next(generator)\r\n preds.append(model.predict(x))\r\n labels.append(y)\r\n # Flatten list of numpy arrays\r\n return np.concatenate(preds).ravel(), np.concatenate(labels).ravel()", "def knn_predict(\n self, train_ids: List[Id], labels: List[int], k: int,\n min_thresh=0., max_thresh=1., sample=1, ids=None\n ) -> List[Tuple[Id, float]]:\n assert max(labels) <= 1\n assert min(labels) >= 0\n return self._data.knn_predict(\n train_ids, labels, k, min_thresh, max_thresh, sample,\n [] if ids is None else ids)", "def predict_batches(self, list_batches):\n widgets = [progressbar.Percentage(),\n ' ', progressbar.Bar(),\n ' ', progressbar.ETA()]\n results_batches = np.array([]) # 3 zeros because is the output of the cnn 3 classes\n bar = progressbar.ProgressBar(widgets=widgets, maxval=len(list_batches)-1)\n bar.start()\n for item, batch in enumerate(list_batches):\n result = self.predict_batch(batch)\n if item == 0:\n results_batches = result.copy()\n else:\n results_batches = np.vstack((results_batches, result))\n bar.update(item)\n bar.update(len(list_batches)-1)\n return results_batches", "def process_batch(self, ids):\n datas = self.db.get_row_by_ids(ids)\n predicted_results = []\n for unlabeled_data in datas:\n predict = self.interpreter.parse(unlabeled_data[\"text\"])\n if predict:\n unlabeled_data.update(predict)\n predicted_results.append(unlabeled_data)\n return predicted_results", "def predict_dataloader(self, ds):\n data_loader = torch.utils.data.DataLoader(ds,\n batch_size=self.config[\"batch_size\"],\n shuffle=False,\n num_workers=self.config[\"workers\"])\n\n return data_loader", "def dataset(config, batch_size, split) -> Tuple[tf.data.Dataset, int]:\n root_dir, dataset_name = config[\"root_dir\"], config['dataset_name']\n dataset_dir = os.path.join(root_dir, 'datasets', dataset_name)\n\n INPUT = Path(dataset_dir)\n IMAGES = {'train': INPUT / 'train-images-idx3-ubyte',\n 'val': INPUT / 't10k-images-idx3-ubyte'}\n LABELS = {'train': INPUT / 'train-labels-idx1-ubyte',\n 'val': INPUT / 't10k-labels-idx1-ubyte'}\n\n images = read_mnist_images(IMAGES, split)\n labels = read_mnist_labels(LABELS, split)\n #random = np.random.RandomState(SEED)\n\n def gen():\n for image, label in zip(images, labels):\n yield image, label\n\n ds = tf.data.Dataset.from_generator(\n gen, (tf.uint8, tf.uint8), ((28, 28, 1), (1,)))\n\n if split == 'train':\n ds = ds.shuffle(512, seed=np.random.randint(0, 1024)).repeat()\n ds = ds.batch(batch_size).map(transform_train, num_parallel_calls=4)\n ds = ds.prefetch(2)\n return ds, len(labels)\n elif split == 'val':\n ds = ds.batch(batch_size).map(transform_val, num_parallel_calls=4)\n ds = ds.prefetch(2)\n return ds, len(labels)", "def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):\n tensor_datasets = []\n for dataset in encoded_datasets:\n n_batch = len(dataset[0])\n\n input_ids = np.zeros((n_batch, 1, input_len), dtype=np.int64)\n mc_token_ids = np.zeros((n_batch, 1), dtype=np.int64)\n lm_labels = np.full((n_batch, 1, input_len), fill_value=-1, dtype=np.int64)\n mc_labels = np.zeros((n_batch,), dtype=np.int64)\n \n for i, (story, mc_label) in enumerate(zip(*dataset)):\n with_cont1 = [start_token] + story[:cap_length] + [clf_token]\n input_ids[i, 0, :len(with_cont1)] = with_cont1\n \n mc_token_ids[i, 0] = len(with_cont1) - 1\n\n lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]\n\n mc_labels[i] = mc_label\n all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)\n tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))\n return tensor_datasets", "def predict_by_batch_via_pnn(tuple_batches_float32, sess, predictor, batch_size):\n nb_predictions = tuple_batches_float32[0].shape[0]\n nb_batches = tls.divide_ints_check_divisible(nb_predictions,\n batch_size)\n \n # If PNN is fully-connected, the width of the target patch\n # is retrieved from the size of its flattened masked context.\n # If PNN is convolutional, the width of the target patch is\n # equal to the height of the masked context portion located\n # above the target patch.\n if predictor.is_fully_connected:\n width_float = numpy.sqrt(float(tuple_batches_float32[0].shape[1])/5.).item()\n if not width_float.is_integer():\n raise ValueError('`numpy.sqrt(float(tuple_batches_float32[0].shape[1])/5.)` is not a whole number.')\n width_target = int(width_float)\n else:\n width_target = tuple_batches_float32[0].shape[1]\n predictions_float32 = numpy.zeros((nb_predictions, width_target, width_target, 1),\n dtype=numpy.float32)\n for i in range(nb_batches):\n if predictor.is_fully_connected:\n feed_dict={\n predictor.node_flattened_contexts_float32:tuple_batches_float32[0][i*batch_size:(i + 1)*batch_size, :]\n }\n else:\n feed_dict={\n predictor.node_portions_above_float32:tuple_batches_float32[0][i*batch_size:(i + 1)*batch_size, :, :, :],\n predictor.node_portions_left_float32:tuple_batches_float32[1][i*batch_size:(i + 1)*batch_size, :, :, :]\n }\n predictions_float32[i*batch_size:(i + 1)*batch_size, :, :, :] = sess.run(\n predictor.node_predictions_float32,\n feed_dict=feed_dict\n )\n return predictions_float32", "def _reshape_pred(pred: List[np.ndarray]) -> np.ndarray:\n pred_with_id = []\n for id_batch in range(pred.shape[0]):\n pred_single = pred[id_batch]\n local_ids = np.repeat([id_batch], pred_single.shape[0], axis=None)\n local_ids = np.expand_dims(local_ids, axis=-1)\n pred_single = np.concatenate([local_ids, pred_single], axis=1)\n pred_with_id.append(pred_single[pred_single[:, -1] > 0, :-1])\n pred_with_id = np.concatenate(pred_with_id, axis=0)\n return pred_with_id", "def get_train_dataset(self, stage_id: int) -> tf.data.Dataset:\n pass", "def create_batches(self):\n random.shuffle(self.training_data_set) \n batches = [self.training_data_set[graph:graph+self.args.batch_size] for graph in range(0, len(self.training_data_set), self.args.batch_size)]\n return batches", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def get_soft_scores_and_true_labels(dataset, model):\n test_dataloader = DataLoader(dataset,32,shuffle=True)\n model = model.to(device=device)\n all_first_soft_scores = []\n all_second_soft_scores = []\n gt_labels = []\n for batch_idx, (inputs, targets) in enumerate(test_dataloader):\n inputs = inputs.to(device)\n targets = targets.to(device)\n\n with torch.no_grad():\n scores = model(inputs)\n all_first_soft_scores = all_first_soft_scores + scores[:,0].tolist()\n all_second_soft_scores = all_second_soft_scores + scores[:, 1].tolist()\n gt_labels = gt_labels + targets.tolist()\n\n return all_first_soft_scores, all_second_soft_scores, gt_labels", "def predict(self, dataset):\n\n if self.seed is not None:\n np.random.seed(self.seed)\n\n num_test_samples, _ = np.shape(dataset.features)\n\n samples_covered = 0\n pred_labels = []\n while samples_covered < num_test_samples:\n start = samples_covered\n end = samples_covered + self.batch_size\n if end > num_test_samples:\n end = num_test_samples\n batch_ids = np.arange(start, end)\n batch_features = dataset.features[batch_ids]\n batch_labels = np.reshape(dataset.labels[batch_ids], [-1,1])\n batch_protected_attributes = np.reshape(dataset.protected_attributes[batch_ids][:,\n dataset.protected_attribute_names.index(self.protected_attribute_name)], [-1,1])\n\n batch_feed_dict = {self.features_ph: batch_features,\n self.true_labels_ph: batch_labels,\n self.protected_attributes_ph: batch_protected_attributes,\n self.keep_prob: 1.0}\n\n pred_labels += self.sess.run(self.pred_labels, feed_dict=batch_feed_dict)[:,0].tolist()\n samples_covered += len(batch_features)\n\n # Mutated, fairer dataset with new labels\n dataset_new = dataset.copy(deepcopy = True)\n dataset_new.scores = np.array(pred_labels, dtype=np.float64).reshape(-1, 1)\n dataset_new.labels = (np.array(pred_labels)>0.5).astype(np.float64).reshape(-1,1)\n\n\n # Map the dataset labels to back to their original values.\n temp_labels = dataset_new.labels.copy()\n\n temp_labels[(dataset_new.labels == 1.0).ravel(), 0] = dataset.favorable_label\n temp_labels[(dataset_new.labels == 0.0).ravel(), 0] = dataset.unfavorable_label\n\n dataset_new.labels = temp_labels.copy()\n\n return dataset_new", "def extract_dataset_features(classes_imgs, labels, config):\n all_features = []\n all_labels = []\n for i, class_imgs in enumerate(classes_imgs):\n class_features = []\n class_labels = np.repeat(labels[i], len(class_imgs))\n all_labels = class_labels if len(all_labels) == 0 else np.concatenate((all_labels, class_labels))\n for class_img_path in class_imgs:\n img = load_image(class_img_path)\n img_features = compute_features(img, color_space=config.color_space, \n hog_orient=config.hog_orientations, \n hog_pix_per_cell=config.hog_pixels_per_cell,\n hog_cells_per_block=config.hog_cells_per_block,\n hog_color_channels=config.hog_color_channels)\n class_features.append(img_features)\n\n all_features.append(class_features)\n \n normed_features, normaliser = normalise_features(all_features) \n return normed_features, all_labels, normaliser", "def _batch_inference(self, batched_inputs):\n outputs = []\n inputs = []\n for idx, input in zip(count(), batched_inputs):\n inputs.append(input)\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n outputs.extend(self.model.inference(inputs, do_postprocess=False))\n inputs = []\n return outputs", "def get_train_data_and_label(self) -> typing.Tuple[pd.DataFrame, pd.Series]:\n return self._extract_data_and_label(self.train_filters)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test new and current version functions.
def test_versions(self): self.assertEqual(Project.objects.current_version("test3"), 2) self.assertEqual(Project.objects.next_version("test3"), 3) self.assertEqual(Project.objects.current_version("dne"), 0) self.assertEqual(Project.objects.next_version("dne"), 1)
[ "def test_versions(self):\n versions = self._project.versions()\n self.assertTrue(\"0.1\" in versions)", "def test_version_matches_expected():\n assert __version__ == \"0.1.0\"", "def test_show_version(self):\n version = 'Iteration ' + __init__.__version__\n self.assertEqual(news_functions.show_version(), version)", "def test_current_version(self):\n self.assertEquals(version.current_version(),\n sys.version_info)\n self.assertEquals(version.current_version_string(),\n \"{0}.{1}.{2}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro))", "def test_edit_both_versions(self):\n self.instance.package = self.input_ovf\n self.instance.version = \"5.2.0.01I\"\n self.instance.full_version = \"Cisco IOS XRv, Version 5.2\"\n self.instance.run()\n self.instance.finished()\n self.check_diff(\"\"\"\n <ovf:Vendor>Cisco Systems, Inc.</ovf:Vendor>\n- <ovf:Version>DEV</ovf:Version>\n- <ovf:FullVersion>DEVELOPMENT IMAGE</ovf:FullVersion>\n+ <ovf:Version>5.2.0.01I</ovf:Version>\n+ <ovf:FullVersion>Cisco IOS XRv, Version 5.2</ovf:FullVersion>\n <ovf:ProductUrl>http://www.cisco.com/en/US/products/ps12559/index.html\\\n</ovf:ProductUrl>\n\"\"\")", "def test_version_add_ok(self):\n self.execute('version add 9.9 \"%s\"' % self._test_date)\n rv, output = self.execute('version list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def test_model_version():\n\n assert model_version() == '0.19.0'", "def test_get_version(self):\n v7_version = utils.get_version(self.v7_sff_file)\n self.assertEqual(v7_version, '0.7.0.dev0')\n v8_version = utils.get_version(self.v8_sff_file)\n self.assertEqual(v8_version, '0.8.0.dev1')", "def test_version():\n instance = solvate_step.Solvate()\n result = instance.version\n assert isinstance(result, str) and len(result) > 0", "def testGetVersion(self):\n pvi = ProjectVersionInfo()\n vers = pvi.getVersion()\n self.assertNotEqual(vers, \"unknown\")", "def test_versioningAndPublish_andStop(self):\n doc = self._createDocument('dummy','title','HeaderFooter')\n doc.manage_saveSlot('body','Old content')\n # start versioning\n doc.manage_startVersioning()\n # manage_startVersioning() does two things, it sets\n # in_versioning to True and calls _initVersioningTexts()\n assert doc.inVersioning()\n \n # make a change whilst in versioning\n doc.manage_saveSlot('body', 'New content')\n \n # test_versioning() already checks if the seperation is made\n # Now, when we publish these changes we should expect that be able\n # to view New content even if viewing the page anonymously.\n doc.manage_publishVersioning(versioning_off=True)\n \n # there shouldn't be a index_html_versioning template\n assert not hasattr(doc, 'index_html_versioning')\n \n # count the number of revisions which should now have gone up one\n assert doc.countRevisionTimestamps('body') == 3, \\\n doc.countRevisionTimestamps('body')\n \n # viewing it as anonymous should return a page with the 'New content'\n self.logout()\n viewed = doc.view(self.app.REQUEST)\n assert viewed.find('New content') > -1\n assert viewed.find('Old content') == -1", "async def test_version(self):\n iq = await self.clients[1]['xep_0092'].get_version(\n self.clients[0].boundjid.full\n )\n version = iq['software_version']\n self.assertEqual(version['name'], 'Slix Test')\n self.assertEqual(version['version'], '1.2.3.4')\n self.assertEqual(version['os'], 'I use arch btw')", "def test_version(self):\n response = self.client.get('/internal/version')\n self.assert200(response)\n self.assertIn('version', response.json)", "def test_version_compare(fb, fb_secure):\n\n assert fb.version_compare(\"2.0\", \"1.0\") == 1\n assert fb.version_compare(\"1.0\", \"2.0\") == -1\n assert fb.version_compare(\"1.0\", \"1.0\") == 0\n assert fb.version_compare(\"2.2\", \"2.1\") == 1\n assert fb.version_compare(\"2.1\", \"2.2\") == -1\n assert fb.version_compare(\"2.1\", \"2.1\") == 0", "def test_version_time_ok(self):\n self.execute('version time 2.0 \"%s\"' % self._test_date)\n rv, output = self.execute('version list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def test_get_static_version(self):\n self.assertEqual(get_static_version(), '0.0.1')", "def test_full_update_system(self):\n pass", "def Xtest_versioningAndPublish_andContinue(self):\n doc = self._createDocument('dummy','title','HeaderFooter')\n doc.manage_saveSlot('body','Old content')\n # start versioning\n doc.manage_startVersioning()\n # manage_startVersioning() does two things, it sets\n # in_versioning to True and calls _initVersioningTexts()\n assert doc.inVersioning()\n \n # make a change whilst in versioning\n doc.manage_saveSlot('body', 'New content')\n \n # test_versioning() already checks if the seperation is made\n # Now, when we publish these changes we should expect that be able\n # to view New content even if viewing the page anonymously.\n doc.manage_publishVersioning()\n \n # there shouldn't be a index_html_versioning template\n assert hasattr(doc, 'index_html_versioning')\n \n # count the number of revisions which should now have gone up one\n # (nb: this is of the versioning version)\n assert doc.countRevisionTimestamps('body') == 2, \\\n doc.countRevisionTimestamps('body')\n \n # viewing it as anonymous should return a page with the 'New content'\n self.logout()\n viewed = doc.view(self.app.REQUEST)\n assert viewed.find('New content') > -1\n assert viewed.find('Old content') == -1", "def test_get_version(self):\n pid = 1\n project = model.Project.get(self.session, pid)\n exp = '2.39.0'\n obs = backend.NpmjsBackend.get_version(project)\n self.assertEqual(obs, exp)\n\n pid = 2\n project = model.Project.get(self.session, pid)\n self.assertRaises(\n AnityaPluginException,\n backend.NpmjsBackend.get_version,\n project\n )\n\n pid = 3\n project = model.Project.get(self.session, pid)\n exp = '0.6.2'\n obs = backend.NpmjsBackend.get_version(project)\n self.assertEqual(obs, exp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an integer, being x rounded towards positive infinity.
def ceil(x) -> int: pass
[ "def floor(x) -> int:\n pass", "def floor(x):\n return 0.0", "def iround(x):\n return int(round(x))", "def int_of_x(x):\n try:\n return int(x)\n\n except Exeption:\n return -1", "def floor(n: float) -> int:\n return (int(n//1))", "def ifloor(x):\n\n return np.floor(x).astype(int)", "def _int(v):\n try:\n return int(v)\n except Exception:\n return float(\"nan\")", "def truncate(x):\n return max(min(x, 1.0), 0.0)", "def intround(n):\r\n return int(round(n))", "def roundout(x):\n \n return math.trunc(x + math.copysign(.5, x))", "def nextRoundNumber(x):\n\n #guess to nearest order of magnitude\n if x in (0, 1):\n return x\n\n if x < 0:\n return -1.0 * nextRoundNumber(-x)\n else:\n lg = int(log10(x))\n\n if lg == 0:\n if x < 1:\n base = 0.1\n else:\n base = 1.0\n elif lg < 0:\n base = 10.0 ** (lg - 1)\n else:\n base = 10.0 ** lg # e.g. base(153) = 100\n # base will always be lower than x\n\n if base >= x:\n return base * 1.0\n elif (base * 2) >= x:\n return base * 2.0\n elif (base * 5) >= x:\n return base * 5.0\n else:\n return base * 10.0", "def roundrnd(x: float) -> float:\n return int(x) + int(_random.random() > (1 - (x % 1)))", "def inf(self):\r\n\t\treturn float('inf')", "def get_int(x):\n return int(x, 2)", "def signinf(e, x):\n from ..functions import sign\n\n if not e.has(x):\n return sign(e).simplify()\n if e == x:\n return Integer(1)\n if e.is_Mul:\n a, b = e.as_two_terms()\n return signinf(a, x)*signinf(b, x)\n if e.is_Pow and signinf(e.base, x) == 1:\n return Integer(1)\n\n c0, _ = leadterm(e, x)\n return signinf(c0, x)", "def integer_squareroot(value: int) -> int:\n if not isinstance(value, int) or isinstance(value, bool):\n raise ValueError(\n f\"Value must be an integer: Got: {type(value)}\"\n )\n if value < 0:\n raise ValueError(\n f\"Value cannot be negative: Got: {value}\"\n )\n\n with decimal.localcontext() as ctx:\n ctx.prec = 128\n return int(decimal.Decimal(value).sqrt())", "def ndigits(x):\n if abs(x) == 0:\n return 0\n else:\n return 1 + ndigits(abs(x) / 10)", "def int_or_none(x, limit):\n try:\n value = int(x)\n if 1 <= value <= limit:\n return value\n else:\n return None\n except ValueError:\n return None", "def map_x_in(self, x, clamp=False):\n x = int(x) # Fixme: why int?\n if clamp:\n if x <= self.inf:\n return 0\n elif x >= self.sup:\n x = self.sup\n return x - self.inf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an integer, being x rounded towards negative infinity.
def floor(x) -> int: pass
[ "def ceil(x) -> int:\n pass", "def int_of_x(x):\n try:\n return int(x)\n\n except Exeption:\n return -1", "def iround(x):\n return int(round(x))", "def floor(x):\n return 0.0", "def floor(n: float) -> int:\n return (int(n//1))", "def ifloor(x):\n\n return np.floor(x).astype(int)", "def _int(v):\n try:\n return int(v)\n except Exception:\n return float(\"nan\")", "def get_int(x):\n return int(x, 2)", "def nextRoundNumber(x):\n\n #guess to nearest order of magnitude\n if x in (0, 1):\n return x\n\n if x < 0:\n return -1.0 * nextRoundNumber(-x)\n else:\n lg = int(log10(x))\n\n if lg == 0:\n if x < 1:\n base = 0.1\n else:\n base = 1.0\n elif lg < 0:\n base = 10.0 ** (lg - 1)\n else:\n base = 10.0 ** lg # e.g. base(153) = 100\n # base will always be lower than x\n\n if base >= x:\n return base * 1.0\n elif (base * 2) >= x:\n return base * 2.0\n elif (base * 5) >= x:\n return base * 5.0\n else:\n return base * 10.0", "def intround(n):\r\n return int(round(n))", "def truncate(x):\n return max(min(x, 1.0), 0.0)", "def roundout(x):\n \n return math.trunc(x + math.copysign(.5, x))", "def magnitude(x: float) -> int:\n\n\tif x > 0.0:\n\t\treturn int(log10(x))\n\telif x < 0.0:\n\t\treturn int(log10(abs(x)))\n\telse:\n\t\treturn 0", "def signe(x):\n if x > 0 : return 1\n elif x < 0 : return -1\n else : return 0", "def _decimal_place(x):\n if x == 0:\n digits = 0\n else:\n digits = -int(np.log10(abs(x)) // 1)\n return digits", "def ceil_divide(value, arg):\n try:\n value = int(value)\n arg = int(arg)\n if arg:\n return -(-value // arg)\n except:\n pass\n return ''", "def roundrnd(x: float) -> float:\n return int(x) + int(_random.random() > (1 - (x % 1)))", "def planck_int(x):\n return _planck_int(x)", "def map_x_in(self, x, clamp=False):\n x = int(x) # Fixme: why int?\n if clamp:\n if x <= self.inf:\n return 0\n elif x >= self.sup:\n x = self.sup\n return x - self.inf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if x is finite.
def isfinite(x) -> bool: pass
[ "def isFinite(self) -> bool:\n if np.isfinite(self.data).all():\n return True\n return False", "def is_finite(self):\r\n return not self._is_special", "def is_finite(val):\n return type(val) in (float,int) and val not in (infinity, -infinity, nan)", "def is_finite(self):\n R = self.base_ring()\n if R.is_finite() and R.order() == 1:\n return True\n return False", "def isinf(x):\n return False", "def is_finite(self):\n if self.coordinates is None:\n return False\n elif self.singular:\n return np.all(np.isfinite(self.coordinates))\n elif self.coordinates.ndim == 1:\n return np.isfinite(self.coordinates)\n else:\n return self.apply_coordinate_mask_function(\n self.coordinates, csnf.check_finite)", "def floats(x):\n if isinstance(x, Number):\n return True\n try:\n float(x)\n return True\n except:\n return False", "def test_isfinite5():\n x = np.array([float(\"-inf\"), float(\"-inf\"), float(\"inf\")])\n res = np.isfinite(x)\n obj.run(res=res, x=x)", "def is_infinite(self):\n if self.coordinates is None:\n return False\n elif self.singular:\n return np.all(np.isinf(self.coordinates))\n elif self.coordinates.ndim == 1:\n return np.isinf(self.coordinates)\n else:\n return self.apply_coordinate_mask_function(\n self.coordinates, csnf.check_infinite)", "def isfinite_struct(x, names=None):\n if names is None:\n names = x.dtype.names\n return np.all([np.isfinite(x[n]) for n in names], axis=0)", "def checkFloat(self, value):\n try:\n if float(value) >= 0.0:\n return True\n else:\n return False\n except ValueError:\n return False", "def isfinite(arr):\n return np.isfinite(np.max(arr)) and np.isfinite(np.min(arr))", "def near0(x):\n return abs(x) < 0.00001", "def is_true(self) -> bool:\n if not self.is_finite:\n return False\n return not any(c == 0 for c in self)", "def isinfinite(self):\r\n return self._ranges.first.value.start == -Inf or self._ranges.last.value.end == Inf", "def is_almost_zero(\n x: float,\n num_of_exponents: int = 6) -> bool:\n if num_of_exponents < 0:\n raise ValueError('Number of exponents should be positive. '\n 'It was {}'.format(num_of_exponents))\n window_range = 10 ** -num_of_exponents\n return (x >= -window_range) and (x <= window_range)", "def is_infinitesimal(G):\n from ..tools import left_stop, right_stop\n a = left_stop(G, adorn=False) == right_stop(G, adorn=False) == 0\n b = G != 0\n return a and b", "def anyFloat(self):\n for win in self._data:\n if issubclass(win.dtype.type,np.floating):\n return True\n return False", "def check_gradients_match_finite_differences(\n self, x: np.ndarray = None, *args, **kwargs\n ) -> bool:\n if x is None and 'petab_problem' in dir(self.amici_object_builder):\n x = self.amici_object_builder.petab_problem.x_nominal_scaled\n x_free = self.amici_object_builder.petab_problem.x_free_indices\n return super().check_gradients_match_finite_differences(\n x=x, x_free=x_free, *args, **kwargs\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the base2 logarithm of x.
def log2(x): pass
[ "def log2(x):\n ln2 = torch.log(torch.FloatTensor([2.0]))\n if x.is_cuda:\n ln2 = ln2\n return torch.log(x) / ln2", "def log(x, base=e):\n return 1.0", "def base_two_log(n):\n if n < 2:\n return 0\n else:\n return 1+base_two_log(n//2)", "def log2(num):\n if is_power_of_2(num):\n ret = 0\n while True:\n if num >> ret == 1:\n return ret\n else:\n ret += 1\n else:\n return np.log2(num)", "def max_log2(x):\n d = min(30, -np.log2(np.finfo(float).eps) - np.ceil(np.log2(x)) - 1)\n if (x + 2 ** -d) - x != 2 ** -d:\n raise ValueError('max_log2 failed')\n return d", "def log2( number):\n\n if number == 1:\n logValue = 0\n elif number == 2:\n logValue = 1\n elif number == 4:\n logValue = 2\n elif number == 8:\n logValue = 3\n elif number == 16:\n logValue = 4\n elif number == 32:\n logValue = 5\n else:\n logValue = -1\n return logValue", "def log2(s: Series):\n return np.log2(s)", "def Lin2Log(x, ratio=1.0, basis=1e3):\n import math\n level = abs(log10(x/basis))*ratio\n return level", "def Log2(x, width: int):\n if width < 2:\n return U(0)\n elif width == 2:\n return x[1]\n elif width <= divideAndConquerThreshold:\n return Mux(x[width-1], U(width-1), Log2(x, width-1))\n else:\n mid = int(1 << (clog2(width) - 1))\n hi = x[width-1:mid]\n lo = x[mid-1:0]\n useHi = reduce_or(hi, width - mid)\n return CatBits(useHi, Mux(useHi, Log2(hi, width - mid), Log2(lo, mid)))", "def closest_power_2(x):\n Max_power = int((log(x-0.1,2)))\n return 2**Max_power", "def logarithm(n):\n if n < 2:\n return 0\n else:\n return 1 + logarithm(n / 2)", "def logk(x, base=None):\n\ttry:\n\t\tval = np.log(x.val)/np.log(base)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += x.der[key]/x.val * (1/np.log(base))\n\t\t\tsec_ders[key] += (x.val*x.sec_der[key] - (x.der[key])**2)/(np.log(base) * x.val**2)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\ttry:\n\t\t\treturn np.log(x)/np.log(base)\n\t\texcept AttributeError:\n\t\t\treturn log(x)", "def log_down(x: float) -> float:\n return prev(math.log(x), LIBM_ERROR_LIMIT)", "def log_gamma(x):\n return math.lgamma(x)", "def _log2(n):\n while len(_logtable) <= n:\n _logtable.extend([1 + _logtable[-1]] * len(_logtable))\n return _logtable[n]", "def log(self, base: float = math.e) -> Series:", "def log_up(x: float) -> float:\n return next(math.log(x), LIBM_ERROR_LIMIT)", "def log_squasher(self, x):\n if self.config.log_squasher:\n x_abs = np.absolute(x).astype(float)\n x = np.multiply(np.sign(x), np.log1p(x_abs))\n return x", "def power_of_2_ge(number):\n exponent = math.log(number,2)\n if exponent != int(exponent):\n int_exp = int(exponent+1)\n return int(math.pow(2,int_exp))\n else:\n return number" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns x to the power of y.
def pow(x, y): pass
[ "def pow(x, y):\n return 1.0", "def pow2(x, y=None):\n if y is None:\n z = (2. * np.ones(len(x))) ** x\n else:\n z = x * ((2. * np.ones(len(y))) ** y)\n\n return z", "def bprop_scalar_pow(x, y, out, dout):\n return (scalar_mul(dout, scalar_mul(y, scalar_pow(x, scalar_sub(y, 1)))),\n scalar_mul(dout, scalar_mul(scalar_log(x), out)))", "def two_pow(pow):\n\treturn 2**pow", "def pow_x(xs, p):\n return (x ** p for x in xs)", "def powerlawfunc(x, *p):\n return p[0] + p[1] * x ** (p[2])", "def next_pow(x, power=2):\n return pow(power, np.ceil(np.log(x) / np.log(power)))", "def pow2(n): \n return 1<<n", "def closest_power_2(x):\n Max_power = int((log(x-0.1,2)))\n return 2**Max_power", "def power2(n):\n return 2 ** n", "def power(number, exp=2):\n return number ** exp", "def power(a, b):\n\n if b == 0:\n return 1\n\n return a * power(a, (b - 1))", "def __pow__(self, r):\n return generic_power(self, r)", "def BinMult(x: BinPoly, y: BinPoly) -> BinPoly:\n res = 0\n while x:\n if x & 1:\n res ^= y\n x >>= 1\n y <<= 1\n return res", "def mult(x,y):\r\n return x*y", "def rx_power(y: PDPSegment) -> float:\r\n return y.rx_power", "def power(self):\n return self.curr * self.emf", "def mul_inverse(x, y):\n ans = ext_gcd(y, x)[2]\n if ans >= 0:\n return ans\n return ans + y", "def modularExponentiation(x: int, y: int, N: int) -> int:\n\n if y == 0:\n return 1\n \n if y % 2 == 0:\n return (modularExponentiation(x, y//2, N)**2) % N\n else:\n return (x * modularExponentiation(x, y//2, N)**2) % N", "def __pow__(self, p):\n if type(p) is int:\n return Bruch(self.zaehler ** p, self.nenner ** p)\n else:\n raise TypeError('incompatible types:' + type(p).__name__ + ' is not int')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sum the contents of the vector. >>> v = ... >>> s = v.abssum()
def _ve_asum_ ( s ) : return Ostap.Math.abssum ( s )
[ "def sum(x):\n\treturn np.sum(x)", "def get_vsum(vk, skm, omega):\n summand = 0.5*vk*skm\n vsum = 1/omega* summand.sum()\n return vsum", "def summation(values: np.array) -> float:\n value = np.sum(values)\n return value", "def sum(sequence):\n return __builtin__.sum(sequence)", "def back_sub_sum(A_vec, x_vec, row_num, n_cols):\n out = 0\n for j in range(row_num+1,n_cols):\n out += A_vec[j]*x_vec[j]\n return out", "def sum_sqr_vals(self):\n\treturn numpy.sum(numpy.square(self.data))", "def vectorSum(v0,v1):\n if isinstance(v0,Iterable):\n assert isinstance(v1,Iterable)\n return [vectorSum(a,b) for (a,b) in zip(v0,v1)]\n else:\n return v0+v1", "def sum(self):\n return sum(sum(r) for r in self.data)", "def normalise(vect):\n return vect / np.sum(vect)", "def sum(xs):\r\n return reduce(add, xs)", "def s(series):\n z = len(series[0])*[0 + 0*1j]\n for elem in series:\n z += elem\n return z", "def sum(self):\n return np.sum(self.data)", "def vectorsum(x, y):\n s = []\n for xx, yy in zip(x, y):\n s.append(xx + yy)\n return s", "def sum(self):\n\n return sum(self._values)", "def sum_(args):\n return sum(args)", "def zs(v):\n return (v-v.mean(axis=0))/v.std(axis=0, ddof=1)", "def comp_sum(vectors):\n weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))\n weighted_vectors = []\n for i, weight in enumerate(weight_vector):\n weighted_vectors.append(vectors[i] * weight)\n composed_vector = np.sum(weighted_vectors, axis=0)\n\n return composed_vector", "def func_signal_mag_vector(a):\n sma = np.sqrt(np.nansum(np.power(a, 2))) / len(a)\n return sma", "def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vectorUS___iadd__(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Equality for ValueWithError objects >>> a = VE( ... ) >>> b = VE( ... ) >>> print a == b
def _ve_eq_ ( self , other ) : if isinstance ( other , VE ) : v1 = self .value() v2 = other.value() return _is_equal_ ( v1 , v2 ) and _is_equal_ ( self.cov2() , other.cov2() ) elif _is_zero_ ( self.cov2() ) : return _is_equal_ ( float ( self ) , float ( other ) ) else : raise NotImplementedError ( ' Equality for %s and %s is not implemented' % ( self , other ) )
[ "def same_values(self, v1, v2):\n return v1 == v2", "def __eq__(self, other):\n if not isinstance(other, self._uflclass):\n return isinstance(other, (int,float)) and other == self._value\n else:\n return self._value == other._value", "def myEqu(self, other):\n result = True\n if self.getValue() != other.getValue():\n result = False\n# elif self.getPartName() != other.getPartName():\n# result = False\n elif self.getFootprint() != other.getFootprint():\n result = False\n\n return result", "def __eq__(self, other):\n if isinstance(other, Register):\n return self.value == other.value\n return self.value == other", "def test_eq(self):\n # events are equal if the have the same public_id\n # Catch warnings about the same different objects with the same\n # resource id so they do not clutter the test output.\n with warnings.catch_warnings() as _:\n warnings.simplefilter(\"ignore\")\n ev1 = Event('id1')\n ev2 = Event('id1')\n ev3 = Event('id2')\n self.assertTrue(ev1 == ev2)\n self.assertTrue(ev2 == ev1)\n self.assertFalse(ev1 == ev3)\n self.assertFalse(ev3 == ev1)\n # comparing with other objects fails\n self.assertFalse(ev1 == 1)\n self.assertFalse(ev2 == \"id1\")", "def eq_(a, b, msg=None):\n assert a == b, msg or \"%r != %r\" % (a, b)", "def __eq__(self, other):\n return (isinstance(other, self.__class__)\\\n and (self._lhs == other._lhs) \\\n and (self._rhs == other._rhs) \\\n and (self._phi_c == other._phi_c) )", "def testEquals(self):\n vf1 = VidFeed()\n vf2 = VidFeed()\n vf3 = VidFeed()\n\n vf1.feed_url = '127.0.0.1'\n vf2.feed_url = '127.0.0.1'\n vf3.feed_url = '192.168.1.1'\n\n self.assertEqual(vf1, vf2)\n self.assertTrue(vf1 == vf2)\n self.assertFalse(vf1 == vf3)", "def __eq__(self, value):\r\n return self._key == value._key", "def __eq__(self, other: 'SumVariant') -> bool:\n return all([\n self.variant_of is other.variant_of,\n self.name == other.name,\n self.value == other.value,\n self.constructor == other.constructor,\n self.contract == other.contract\n ])", "def __eq__(self, other):\n\n if type(other) == int:\n return self._val == other\n if type(other) == Counter:\n return self._val == other._val\n else:\n raise TypeError", "def test_equal_on_equal(self):\n a = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n b = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __eq__(self,other):\n if isinstance(other, self.__class__):\n if self.test_code == other.test_code:\n #Implement loose equality for specific test codes\n if self.test_code.code in ['CD4', 'CD4%', 'CD8', 'CD8%']:\n return abs(int(float(self.result_item_value))-int(float(other.result_item_value))) <= 1\n else:\n return self.result_item_value == other.result_item_value\n else:\n return False \n else:\n return False", "def _ve_ne_ ( self , other ) :\n try: \n return not self == other\n except NotImplementedError :\n raise NotImplementedError ( ' Inequality for %s and %s is not implemented' % ( self , other ) )", "def testEqualityWithResultRow(self):\n other = sqlresult.ResultRow(self.names, self.values)\n self.assertNotEqual(id(self.row), id(other))\n self.assertEquals(self.row, other)", "def test_eq_not_identical(self):\n loc1 = SimpleLocation(22, 42, 1)\n loc2 = SimpleLocation(23, 42, 1)\n self.assertNotEqual(loc1, loc2)\n\n loc1 = SimpleLocation(23, 42, 1)\n loc2 = SimpleLocation(23, 43, 1)\n self.assertNotEqual(loc1, loc2)\n\n loc1 = SimpleLocation(23, 42, 1)\n loc2 = SimpleLocation(23, 42, -1)\n self.assertNotEqual(loc1, loc2)\n\n loc1 = SimpleLocation(23, 42, 1)\n loc2 = (23, 42, 1)\n self.assertNotEqual(loc1, loc2)\n\n loc1 = SimpleLocation(23, 42, 1, \"foo\")\n loc2 = SimpleLocation(23, 42, 1, \"bar\")\n self.assertNotEqual(loc1, loc2)\n\n loc1 = SimpleLocation(23, 42, 1, \"foo\", \"bar\")\n loc2 = SimpleLocation(23, 42, 1, \"foo\", \"baz\")\n self.assertNotEqual(loc1, loc2)", "def __eq__(self, other):\r\n return self.curvature_str == other.curvature_str", "def test_equivalent(self):\n op1 = And(BoolVar(), PedestriansCrossingRoad())\n op2 = And(PedestriansCrossingRoad(), BoolVar())\n op3 = And(DriversAwaitingGreenLightVar(), BoolVar())\n\n op1.check_equivalence(op2)\n op2.check_equivalence(op1)\n\n assert_raises(AssertionError, op1.check_equivalence, op3)\n assert_raises(AssertionError, op2.check_equivalence, op3)\n assert_raises(AssertionError, op3.check_equivalence, op1)\n assert_raises(AssertionError, op3.check_equivalence, op2)\n\n ok_(op1 == op2)\n ok_(op2 == op1)\n ok_(op1 != op3)\n ok_(op2 != op3)\n ok_(op3 != op1)\n ok_(op3 != op2)", "def test_equality(self):\n\n SAMPLE = struct.Sentinel('SAMPLE')\n assert SAMPLE.name == \"SAMPLE\"\n\n SAMPLE2 = struct.Sentinel('SAMPLE')\n assert SAMPLE == SAMPLE2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inequality for ValueWithError objects >>> a = VE( ... ) >>> b = VE( ... ) >>> print a != b
def _ve_ne_ ( self , other ) : try: return not self == other except NotImplementedError : raise NotImplementedError ( ' Inequality for %s and %s is not implemented' % ( self , other ) )
[ "def test_notequal(self):\r\n f1 = Fraction(1, 3)\r\n f2 = Fraction(1, 7)\r\n f3 = Fraction(-3, -9)\r\n self.assertFalse(f1 != f1)\r\n self.assertTrue(f1 != f2)\r\n self.assertFalse(f1 != f3)\r\n self.assertTrue(f2 != f3)\r\n self.assertTrue(f1 != Fraction(-1, 3))\r\n self.assertFalse(f1 != Fraction(-1, -3))", "def _ve_eq_ ( self , other ) :\n if isinstance ( other , VE ) :\n v1 = self .value()\n v2 = other.value()\n return _is_equal_ ( v1 , v2 ) and _is_equal_ ( self.cov2() , other.cov2() )\n elif _is_zero_ ( self.cov2() ) :\n return _is_equal_ ( float ( self ) , float ( other ) ) \n else :\n raise NotImplementedError ( ' Equality for %s and %s is not implemented' % ( self , other ) )", "def __ne__(self, other):\n self._typecheck_other(other)\n if np.isscalar(self._ders):\n if np.isscalar(other._ders):\n return self._val != other._val or self._ders != other._ders\n else:\n raise TypeError('Can not compare a scaler Ad_Var and a vector Ad_Var')\n else:\n if np.isscalar(other._ders):\n raise TypeError('Can not compare a scaler Ad_Var and a vector Ad_Var')\n else:\n return (self._val != other._val) or (self._ders != other._ders).any()", "def test_equal_on_not_equal_value(self):\n a = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n b = objects.SecretData(self.bytes_b, enums.SecretDataType.PASSWORD)\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __ne__(self, other):\r\n return self.curvature_str != other.curvature_str", "def test_equal_false(self):\n config1 = Config({'foo': {'bar': 'baz'}})\n config2 = Config({'foo': {'bar': 'bza'}})\n self.assertFalse(config1 == config2)", "def test_not_equal_on_equal(self):\n a = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n b = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def myEqu(self, other):\n result = True\n if self.getValue() != other.getValue():\n result = False\n# elif self.getPartName() != other.getPartName():\n# result = False\n elif self.getFootprint() != other.getFootprint():\n result = False\n\n return result", "def test_equality_fail_type(self):\r\n self.car1.car_type = \"Coupe\"\r\n self.assertFalse(self.car1.equality(self.car3))", "def test_assertNotEqual_unequal(self):\n for first, second in self.unequal_pairs:\n try:\n self.assertNotEqual(first, second)\n except:\n raise AssertionError, \\\n \"unit_test.assertNotEqual failed on input %s and %s\" \\\n % (`first`, `second`)", "def test_unequal(self):\n\t\tid1 = self.generator.generate()\n\t\tid2 = self.generator.generate()\n\t\tself.assertNotEqual(id1, id2)", "def test_not_equal_on_not_equal_value(self):\n a = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n b = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128b)\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_equal_on_not_equal_value(self):\n a = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n b = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128b)\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def eq_(a, b, msg=None):\n assert a == b, msg or \"%r != %r\" % (a, b)", "def test_equality(self):\n self.assertEqual(self.atom1, self.atom1)\n self.assertNotEqual(self.atom1, self.atom2)\n self.assertNotEqual(self.atom1, self.atom3)\n self.assertNotEqual(self.atom1, self.atom4)", "def __ne__(self, r):\n return not self.__eq__(r)", "def vm_impl_not_equal(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n out = vm.not_equal(x, y)\n return Tensor(np.array(out))\n\n return vm_impl", "def test_assert_not_almost_equal(self):\n self.assertNotAlmostEqual(3.1, 3.3)", "def test_assertNotSameObj_true(self):\n self.assertNotSameObj(\"foo\", \"bar\")\n self.assertNotSameObj(None, 5)\n self.assertNotSameObj(lambda x:5, lambda y:6)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
HAshing function for VE objecte >>> v = VE ( ... ) >>> h = hash ( v )
def _ve_hash_ ( v ) : return hash ( ( v.value() , v.cov2() ) )
[ "def get_hash(self, descriptor):", "def hash(self, object):\r\n # TODO: can we add overflow support for collisions?\r\n return md5.new(repr(object)).hexdigest()", "def hash_vector(self, v, querying=False):\n raise NotImplementedError", "def __hash__(self):\n # we assume the largest number of faces is 4, face id is 1-4, need 0-3\n hval = self.element.id*4+(self.id-1)\n return hval", "def __hash__(self):\n # This computes the hash of the lhs name\n h = hash(self.lhs)\n\n # Then combine the hash by XOR the hash of each RHS symbol\n for rhs in self.rhs_list:\n h ^= hash(rhs)\n\n return h", "def __hash__(self):", "def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)", "def __hash__(self):\n return hash(self.name + self.description)", "def _hash(data):\r\n hash_algo = hashlib.new('md5')\r\n hash_algo.update(pickle.dumps(data))\r\n # prefix allows possibility of multiple applications\r\n # sharing same keyspace\r\n return 'esi_' + hash_algo.hexdigest()", "def __hash__(self):\n # Since hash itself is integer type\n h = 0\n for item in self.item_set:\n h ^= hash(item)\n\n return h", "def hash(self):\n return self.wh", "def __hash__(self):\n return hash(('Species', self.fingerprint))", "def __hash__(self):\n\n return hash(\n (self.__class__, ) + self._defining_values\n )", "def __hash__(self):\n if self.is_scalar():\n return hash(self.real)\n elif self.is_complex():\n return hash(complex(self.real, self.get_imag()))\n else:\n return hash((self.real, self.i, self.j, self.k))", "def __hash__(self) -> hash:\n if self.empty:\n return hash(())\n else:\n return hash((self.data, self.left, self.right))", "def __hash__(self) -> \"size_t\":\n return _coin.SoBase___hash__(self)", "def __hash__(self):\n return hash((self.bike.public_key, self.remote))", "def __hash__(self):\n return hash(self.__dn__)", "def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the gaussian random number >>> v = ... the number with error get 100 random numbers
def _ve_gauss_ ( s , accept = lambda a : True , nmax = 1000 ) : # if 0 >= s.cov2() or iszero ( s.cov2 () ) : return s.value() ## return # v = s.value () e = s.error () # for i in range ( nmax ) : r = _gauss ( v , e ) if accept ( r ) : return r logger.warning("Can'n generate proper random number %s" % s ) return v
[ "def gaussian( x, mu, var):\n\treturn np.exp(-np.power(x - mu, 2.) / (2 * np.power(var, 2.)))", "def SpecialGauss(self,mean, sigma):\n rand = 10.0 * sigma\n while abs(rand) > 2.0 * sigma:\n rand = random.gauss(0,sigma)\n return(rand + mean)", "def gaussian(x, mu, sigma):\r\n return exp(- ((x - mu) ** 2) / (2 * (sigma ** 2)))", "def gaussian(x, mean, std):\n return (1/(std*np.sqrt(2*np.pi))) * np.exp(-0.5*np.square((x-mean)/std))", "def testGaussian(self):\n random.seed(42)\n\n us = UniformSample()\n for _ in range(300):\n us.update(random.gauss(42.0, 13.0))\n self.assertAlmostEqual(us.mean, 43.143067271195235, places=5)\n self.assertAlmostEqual(us.stddev, 13.008553229943168, places=5)\n\n us.clear()\n for _ in range(30000):\n us.update(random.gauss(0.0012, 0.00005))\n self.assertAlmostEqual(us.mean, 0.0012015284549517493, places=5)\n self.assertAlmostEqual(us.stddev, 4.9776450250869146e-05, places=5)", "def gauss_distribution(x, mu, sigma):\n return 1 / (2 * math.sqrt(math.pi) * sigma) * math.exp(-(1 / 2) * ((x - mu) / sigma) ** 2)", "def gaussian(mu, sigma, x):\n return np.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / np.sqrt(2.0 * np.pi * (sigma ** 2))", "def gaussian_function(x, sigma):\n return (1 / (sigma * math.sqrt(2 * math.pi))) * (math.exp(-(x ** 2) / (2 * sigma ** 2)))", "def gaussian(self, mu, sigma, x):\n return np.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / np.sqrt(2.0 * np.pi * (sigma ** 2))", "def gauss(x, mu, sigma):\n return np.exp(-0.5 * ((x - mu) / sigma) ** 2) / sigma / np.sqrt(2 * np.pi)", "def gaussian(self):\n i=0\n variance=1\n while (i < self.npoints):\n \n \n self.data[i] = np.random.normal(self.mean, variance)\n self.like[i] = 1/(2*np.pi)*np.exp(-(self.data[i,0]-self.mean[0])**2/2*variance**2)*np.exp(-(self.data[i,1]-self.mean[1])**2/2*variance**2)\n i+=1\n self.volume = math.pi**(self.dim/2)*(variance*2)**(self.dim)/math.gamma(self.dim/2+1) # the factor around variance is the number of sigma\n self.maindensity=self.npoints/self.volume \n print(\"main density is\", self.maindensity)", "def breit_wigner_random(rnd, mean, gamma) :\n rval = 2.*rnd - 1\n displ = 0.5*gamma*atfi.tan(rval*math.pi/2.)\n return mean + displ", "def gaussian_sampler(mean, stddev, batch_size):\n return tf.random_normal(\n [batch_size], mean=mean, stddev=stddev, dtype=Args.data_type\n )", "def radial_gaussian_nb(result, r, sigma):\n for i in range(len(result)):\n result[i] = math.exp(-r[i] ** 2. / 2. / sigma ** 2.)", "def gaussian_curve(x, a, m, s):\n return a * scipy_norm.pdf(x, loc=m, scale=s)", "def __reward_Gaussian(self, x):\n return np.exp(-x*x/2.0)", "def sample_exponential(lambd: float) -> float:\n return -log(random.random()) / lambd", "def gaussian_log_prob(self, x, mu, sigma):\n val = tf.math.log((1/(sigma*tf.cast(tf.sqrt(2*pi), self.dtype))))\n val = val - 0.5*((x-mu)/sigma)**2 \n val = tf.reduce_sum(val, axis=0)\n return(val)", "def error(self):\n return np.random.normal(scale=self.sd, size=1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a host group.
def get(self, group_name): g = self.request.mongo_connection.shinken.hostgroups.find_one( {"hostgroup_name": group_name}, {'_id': 0} ) return hostgroup.HostGroup(**g)
[ "def get_hostgroup_by_name(self, name):\n group = self.db_session.query(HostGroup)\\\n .filter(HostGroup.name == name)\\\n .first()\n return group", "def get_hostgroup(hostgroup, limit = None, columns = None, extra_filter = None):\n return query(\"GET hostgroups\\nFilter: name = %s\\n\" % hostgroup,\n limit=limit, columns=columns, item_type=\"hostgroup\",\n extra_filter=extra_filter)", "def _get_hostgroup_id(self, groupname):\n\n cli_cmd = 'showhostgroup'\n out = self._execute_cli(cli_cmd)\n if re.search('Host Group Information', out):\n try:\n for line in out.split('\\r\\n')[6:-2]:\n tmp_line = line.split()\n if len(tmp_line) < 2:\n continue\n if tmp_line[1] == groupname:\n return tmp_line[0]\n except Exception:\n err_msg = (_('CLI out is not normal. CLI out: %s') % out)\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n return None", "def get_group(name: str) -> MeetupObject:\n return CLIENT.GetGroup({'urlname': name})", "def get_host_group_by_name(self, host_group_name):\n LOG.info(\"Getting hostgroup details by name: '%s'\" % host_group_name)\n return self.client.request(\n constants.GET,\n constants.GET_HOST_GROUP_BY_NAME_URL.format(self.server_ip),\n payload=None, querystring=helpers.prepare_querystring(\n constants.SELECT_ALL_HOST_GROUP,\n name=constants.EQUALS + host_group_name\n )\n )", "def getHostsinGroup(self, groupName):\n groupID = None\n groups = requests.get(self.baseURL+\"group\", headers=self.header)\n groups = groups.json()['groups']\n for group in groups:\n if groupName.lower() in group['name'].lower():\n groupID = group['id']\n if groupID is None:\n return\n hostIDs = []\n associations = requests.get(\n self.baseURL+\"groupassociation\",\n headers=self.header\n )\n associations = associations.json()['groupassociations']\n for association in associations:\n if association['groupID'] == groupID:\n hostIDs.append(association['hostID'])\n\n hosts = []\n for hostID in hostIDs:\n hosts.append(requests.get(\n self.baseURL+\"host/\"+str(hostID),\n headers=self.header\n ).json())\n return hosts", "def create(self):\n\n hostGroup = self.new()\n hostGroup.create()\n return hostGroup", "def associate(self, host, group):\n return self._assoc('groups', host, group)", "def group(self) -> click.Group:\n if self._group is None:\n self._load_group()\n return self._group", "def group(self):\r\n\r\n for group in self.all_groups:\r\n if self in group:\r\n return group\r\n return None\r\n\r\n # To get the group directly from the network, try the code below\r\n # though it is probably slower than that above\r\n # current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[\r\n # 'CurrentZoneGroupID']\r\n # if current_group_id:\r\n # for group in self.all_groups:\r\n # if group.uid == current_group_id:\r\n # return group\r\n # else:\r\n # return None\r", "def _get_process_group(self, name):\n group = self.supervisord.process_groups.get(name)\n if group is None:\n raise RPCError(SupervisorFaults.BAD_NAME, 'group: %s' % name)\n return group", "def ad_group_get(name: str) -> AdGroup:\n command: List[str] = ['az', 'ad', 'group', 'show', f'--group={name}']\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG, process, debug=ARGS.debug)\n if process.returncode != 0:\n return AdGroup()\n ad_group: AdGroup = json_to_dataclass(process.stdout, AdGroup)\n # LOG.debug(f'ad_group: {ad_group}')\n return ad_group", "def get_nodegroup_by_name(self, context, cluster_id, nodegroup_name):", "def _get_port_group(self):\n return self.__port_group", "def test_get_hostgroups(self):\n pass", "def radius_provider_group_get(handle, name,\n caller=\"radius_provider_group_get\"):\n dn = _radius_dn + \"/providergroup-\" + name\n mo = handle.query_dn(dn)\n if mo is None:\n raise UcsOperationError(caller,\n \"Radius Provider Group'%s' does not exist\" % dn)\n return mo", "def group(self, name):\n if name is None:\n return self._groups\n try:\n return self._groups[name]\n except KeyError:\n matches = [g for g in self._groups if g.name.startswith(name)]\n if len(matches) == 1:\n return matches[0]\n raise", "def get_all(self):\n hostgroups = [g for g\n in self.request.mongo_connection.\n shinken.hostgroups.find(\n {\"register\": {\"$ne\": \"0\"}},\n {'_id': 0}\n )]\n hostgroups = [hostgroup.HostGroup(**g) for g in hostgroups]\n return hostgroups", "def generate_hgrp(self, name=None, data=None, metadata=None,\n json_string=None, uge_version=None,\n add_required_data=True):\n return self.host_group_manager.generate_object(\n name=name, data=data, metadata=metadata,\n json_string=json_string, uge_version=uge_version,\n add_required_data=add_required_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modify an existing host group.
def update(self, group_name, group): group_dict = group.as_dict() if "hostgroup_name" not in group_dict.keys(): group_dict['hostgroup_name'] = group_name self.request.mongo_connection.shinken.hostgroups.update( {"hostgroup_name": group_name}, group_dict )
[ "def modify_host_group(self, host_group_id, name=None,\n remove_host_ids=None,\n add_host_ids=None, description=None):\n LOG.info(\"Modifying hostgroup: '%s'\" % host_group_id)\n payload = self._prepare_modify_host_group_payload(\n name, remove_host_ids, add_host_ids, description)\n return self.client.request(\n constants.PATCH, constants.MODIFY_HOST_GROUP_URL.format(\n self.server_ip, host_group_id),\n payload)", "def modify_hgrp(self, pycl_object=None, name=None, data=None,\n metadata=None, json_string=None):\n return self.host_group_manager.modify_object(\n pycl_object=pycl_object, name=name, data=data,\n metadata=metadata, json_string=json_string)", "def test_update_hosts_in_hostgroup(self):\n pass", "def test_update_eip_group(self):\n name = 'test_eip_group_new'\n self.client.update_eip_group(id=EIP_GRP_ID, name=name)", "def modify_hgrps(self, hgrp_list):\n return self.host_group_manager.modify_objects(hgrp_list)", "def do_portgroup_update(cc, args):\n patch = utils.args_array_to_patch(args.op, args.attributes[0])\n portgroup = cc.portgroup.update(args.portgroup, patch)\n _print_portgroup_show(portgroup, json=args.json)", "def associate(self, host, group):\n return self._assoc('groups', host, group)", "def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )", "def test_add_host_to_hostgroup(self):\n pass", "def add_host_to_group(self, name, host):\n group = self.get_hostgroup_by_name(name)\n group.hosts.append(host)\n transaction.commit()", "def update_nodegroup(self, cluster_id, nodegroup_id, values):", "def update_security_group(sg_id, name, desc):\n return IMPL.update_security_group(sg_id, name, desc)", "def delete(self, group_name):\n self.request.mongo_connection.shinken.hostgroups.remove(\n {\"hostgroup_name\": group_name}\n )", "def test_delete_host_from_group(self):\n pass", "def test_delete_host_group(self):\n pass", "def cli(env, group_id, name, description):\n mgr = SoftLayer.NetworkManager(env.client)\n data = {}\n if name:\n data['name'] = name\n if description:\n data['description'] = description\n\n if not mgr.edit_securitygroup(group_id, **data):\n raise exceptions.CLIAbort(\"Failed to edit security group\")", "def set_group(group,path):\n path = Location(path)\n if not path.is_remote:\n # Set the group for local files\n gid = bcftbx_utils.get_gid_from_group(group)\n if gid is None:\n raise Exception(\"Failed to get gid for group '%s'\" % group)\n for f in bcftbx_utils.walk(path.path,include_dirs=True):\n logger.debug(\"Updating group for %s\" % f)\n os.lchown(f,-1,gid)\n else:\n try:\n # Set group for remote files\n chmod_cmd = applications.general.ssh_command(\n path.user,\n path.server,\n ('chgrp','-R',group,path.path))\n print \"Running %s\" % chmod_cmd\n chmod_cmd.run_subprocess()\n except Exception as ex:\n raise Exception(\n \"Exception changing group to '%s' on remote \"\n \"destination %s: %s\" %\n (group,path,ex))", "def update_server_group(self, gid, **kwargs):\n body = {\"group\": {}}\n for k, v in kwargs.items():\n body['group'][k] = v\n\n return self.__post('/v1/groups/%s' % gid, body)", "async def change_hosts(host_groups):\n\n logging.info(\"Agent to change hosts is ready to receive host groups.\")\n\n async for host_group_key, host_group in host_groups.items():\n if host_group_key.startswith(\"add\"):\n # host has to be added\n # get current host group from host table\n host_group_current = host_table[host_group.ip]\n\n if not (host_group_current and host_group_current.time > host_group.time):\n # update host group in host table\n host_table[host_group.ip] = host_group\n\n elif host_group_key.startswith(\"delete\"):\n # host has to be deleted\n # get current host group from host table\n host_group_current = host_table[host_group.ip]\n\n if host_group_current and host_group_current.time == host_group.time:\n # delete host group from host table when has not changed\n host_table.pop(host_group_current.ip)\n\n else:\n raise Exception(\n f\"Agent to change hosts raised an exception because a host group has an unknown action. The \" \\\n f\"key of the host group is {host_group_key}.\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete existing host group.
def delete(self, group_name): self.request.mongo_connection.shinken.hostgroups.remove( {"hostgroup_name": group_name} )
[ "def test_delete_host_group(self):\n pass", "def delete_hgrp(self, name):\n return self.host_group_manager.delete_object(name)", "def test_delete_host_from_group(self):\n pass", "def delete_host_group(self, host_group_id):\n LOG.info(\"Deleting hostgroup: '%s'\" % host_group_id)\n return self.client.request(\n constants.DELETE, constants.DELETE_HOST_GROUP_URL.format(\n self.server_ip, host_group_id),\n payload=None)", "def delete_group(self, group_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_destroy_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[0]\r\n self.assertTrue(group.destroy())", "def deleteGroup(id):", "def test_ex_create_and_delete_empty_group(self):\r\n group = self.driver.ex_create_group('libcloud_test_group')\r\n group.destroy()", "def delete_group(self, group):\n path = \"api/groups/{0}\".format(group)\n self._delete(path)", "def delete_hgrps(self, hgrp_list):\n return self.host_group_manager.delete_object_list(hgrp_list)", "def delete(self):\n url = f'{self._okta.api}/groups/{self.id}'\n response = self._okta.session.delete(url)\n return response.ok", "def delete_nodegroup(ctx, name, region, verbosity, node_name, kubeconf):\n ng = NodeGroup(node_name, ClusterInfo(name), region=region, kubeconf=kubeconf)\n ng.delete()", "def destroy(self):\r\n return self.driver.ex_destroy_group(self)", "def Delete(iam,groupname: str):\n\t\t\t\treturn iam.resource.Group(groupname).delete()", "def delete_node_group(node_group_id):\n\n # FIXME: Support name and id or ?\n data = {'node_group_id': node_group_id}\n return api_submit('/api/node_groups/{0}'.format(node_group_id), data, method='delete')", "def ex_delete_ip_group(self, group_id):\r\n uri = '/shared_ip_groups/%s' % group_id\r\n resp = self.connection.request(uri, method='DELETE')\r\n return resp.status == httplib.NO_CONTENT", "def test_delete_services_network_group_by_network_group_name(self):\n pass", "def test_destroy_deployed_group_failed(self):\r\n self.driver = AbiquoNodeDriver('muten', 'roshi',\r\n 'http://dummy.host.com/api')\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[0]\r\n self.assertFalse(group.destroy())", "def test_destroy_not_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[1]\r\n self.assertTrue(group.destroy())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new host group.
def create(self, group): self.request.mongo_connection.shinken.hostgroups.insert( group.as_dict() )
[ "def _create_hostgroup(self, hostgroupname):\n cli_cmd = 'createhostgroup -n %(name)s' % {'name': hostgroupname}\n out = self._execute_cli(cli_cmd)\n\n self._assert_cli_operate_out('_create_hostgroup',\n ('Failed to Create hostgroup %s.'\n % hostgroupname),\n cli_cmd, out)", "def create(self):\n\n hostGroup = self.new()\n hostGroup.create()\n return hostGroup", "def create_group():\r\n new_group = input(\"| Enter the name of the Group |\")\r\n adgroup.ADGroup.create(new_group, security_enabled=True, scope='GLOBAL')\r\n return \"| Group created |\"", "def create_group(self, group, **kwargs):\n\n status, data = self.run_gerrit_command('create-group', group, **kwargs)\n\n return status, data", "def create(self):\n path = '/projects/%s/groups/' % (self.client.project,)\n info = self.client._connection.api_request(\n method='POST', path=path, data=self._to_dict())\n self._set_properties_from_dict(info)", "def create_group(self):\n group_name = self.line_grp.text().strip() # removes whitespaces from left and right\n\n if group_name == '':\n display_msg(MsgIcon.WARNING, \"Warning\", \"Please choose a group name\")\n return\n\n self.line_grp.setText(\"\")\n if self.db.insert_group(group_name): # if creation was successful:\n self.list_grp.addItem(group_name) # adds new group to the list.\n self.db.notify_stats() # update stats tab", "async def create_group(self, userid, gameid):\n raise NotImplementedError()", "def test_create_eip_group_with_name(self):\n name = 'test_eip_group'\n self.client.create_eip_group(eip_count=2,\n bandwidth_in_mbps=10,\n name=name, config=None)", "def create_group(c, runner, group):\n if group_exists(c, group, runner=runner):\n return True\n\n cmd = \"groupadd {}\".format(group)\n return runner(cmd, hide=True, warn=True).ok", "def make_group():\n try:\n poll_id = request.args.get('poll_id')\n answer = request.args.get('answer')\n name = request.args.get('name')\n group_id = poll_id + \":\" + str(answer)\n service.create_new_group(group_id, poll_id, name)\n users = answer_service.get_users_by_answer(poll_id, answer)\n group_users_service.insert_users_to_group(users, group_id)\n except Exception as e:\n print(\"ERROR: in make_group():\")\n print(e)\n\n return make_response(\n \"Failed to make_group\\n\"\n \"poll id: \" + poll_id, 500)\n return make_response(\n \"OK, in make_group:\\n\"\n \"poll id: \" + poll_id, 200)", "def handle_create(self):\r\n asclient = self.stack.clients.auto_scale()\r\n group = asclient.create(**self._get_create_args())\r\n self.resource_id_set(str(group.id))", "def test_create_group(self):\n request = {'name': 'Test group'}\n rv = self.post('/group/',\n request,\n token=self.user.token)\n self.assertJsonOk(rv, id=1)\n return", "def create_projects_group():\n sudo('addgroup projects')", "def test_add_host_to_hostgroup(self):\n pass", "def create_server_group(self, name, tag, **kwargs):\n body = {\"group\": {}}\n body['group']['name'] = name\n body['group']['tag'] = tag\n for k, v in kwargs.items():\n body['group'][k] = v\n\n return self.__post('/v1/groups', body)", "def create_address_group(self, **attrs):\n return self._create(_address_group.AddressGroup, **attrs)", "def Create(iam,groupname: str,tag='/'):\n\t\t\t\treturn iam.resource.Group(groupname).create(Path=AWS.preptag(tag))", "def do_portgroup_create(cc, args):\n field_list = ['address', 'extra', 'node_uuid', 'name', 'uuid',\n 'standalone_ports_supported', 'mode', 'properties']\n fields = dict((k, v) for (k, v) in vars(args).items()\n if k in field_list and not (v is None))\n fields = utils.args_array_to_dict(fields, 'extra')\n fields = utils.args_array_to_dict(fields, 'properties')\n portgroup = cc.portgroup.create(**fields)\n\n data = dict([(f, getattr(portgroup, f, '')) for f in field_list])\n cliutils.print_dict(data, wrap=72, json_flag=args.json)", "def create_default_group():\n group_entry = CommandGroupEntry.objects.create()\n return group_entry" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all host groups.
def get_all(self): hostgroups = [g for g in self.request.mongo_connection. shinken.hostgroups.find( {"register": {"$ne": "0"}}, {'_id': 0} )] hostgroups = [hostgroup.HostGroup(**g) for g in hostgroups] return hostgroups
[ "def list_server_groups(self):\n return self.__get('/v1/groups')", "def get_all_servicegroups(self):\n\n return self.make_request('get_all_servicegroups')", "def getgrall():\r\n groups = []\r\n while True:\r\n group = _posix_impl.getgrent()\r\n if not group:\r\n break\r\n groups.append(struct_group(group))\r\n return groups", "def get_objects(self):\n for group in openstack_clients.get_novaclient(\n ).server_groups.list(all_projects=True):\n yield group", "def test_get_hostgroups(self):\n pass", "def ls_groups(self, **kwargs):\n status, data = self.run_gerrit_command('ls-groups', **kwargs)\n\n return data.split('\\n') if status == 0 else []", "def getGroups(self):\n response = self.stub.GetGroups(show_pb2.ShowGetGroupsRequest(\n show=self.data),\n timeout=Cuebot.Timeout)\n groupSeq = response.groups\n return [opencue.wrappers.group.Group(grp) for grp in groupSeq.groups]", "def allGroups(self,\n includeCancelled,\n fetch=None):\n path = \"api/v1/group/all-groups\"\n path_params = {}\n query_params = {\"includeCancelled\": includeCancelled,\n \"fetch\": fetch}\n form_params = {}\n result = self.conn.invoke_method(\"GET\", path, path_params,\n query_params, form_params)\n if result.error:\n raise IbisException(result.error)\n return result.groups", "def groups(self) -> Sequence['outputs.ManagedNetworkGroupResponse']:\n return pulumi.get(self, \"groups\")", "def getGroups(self):\n # TODO - MNour: Objectization of returned values.\n self._validateValues(accountName=self.accountName)\n return self._callBitbucketRestAPI(BitbucketRESTCall.GROUPS, uriParts=[self.accountName])", "def getGroups(self):\n groups = list(set([plugin.group for plugin in self.values()]))\n groups.sort()\n return groups", "def machine_groups(self):\n ret = self._get_attr(\"machineGroups\")\n return ret", "def get_all_template_groups(self) -> dict:\n return self._get(\"/template/templateGroups\")", "def xmlrpc_list_groups(host, port):\n url = \"http://%s:%s\" % (host, port)\n server = ServerProxy(url)\n try:\n return server.listGroups()\n except SocketError:\n raise SocketError(\n 'No Credential server reachable at %s, use fl-credential-ctl '\n 'to start the credential server.' % url)", "def sagroups(self):\n sagroups = []\n # Get all groups\n if self.id:\n groups = self.groups.all()\n if groups:\n # Get fh groups tags\n for group in groups:\n if hasattr(group, 'sagroup'):\n sagroups.append(group.sagroup.tag)\n\n return sagroups", "def query_all_groups():\n grp = MetalGroup.query.order_by(MetalGroup.level).all()\n return grp", "def _get_groups(self):\n if self._groups is None:\n self._groups = PlatoUserGroups(self)\n\n return self._groups", "def get_all_hosts(self):\n logging.debug('REPOSITORY: Getting all hosts')\n hosts = list()\n # Add hosts from the SSH config file\n hosts.extend(self.get_system_ssh_config_hosts())\n hosts.extend(self.get_user_ssh_config_hosts())\n # Add hosts from the saved config file\n hosts.extend(self.get_hosts())\n return hosts", "def get_group_list(self, globs):\n # special cases to speed things up:\n return self._expand_globs(globs,\n list(self.core.metadata.groups.keys()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test default password store discovery in command line interface.
def test_cli_defaults(self): with MockedHomeDirectory() as home: touch(os.path.join(home, ".password-store", "the-only-entry.gpg")) returncode, output = run_cli(main, "-l") assert returncode == 0 entries = output.splitlines(False) assert entries == ["the-only-entry"]
[ "def test_password_prompt(self, fake_getpass, fake_stderr):\n cli_args = ['--clusters', 'myCluster', '--location', '/foo', '--username', 'pat']\n\n iiqtools_cluster_backup.parse_args(cli_args)\n\n fake_getpass.assert_called()", "def test_config_nopass_askpass(fakeClient, tmpconfigfile, monkeypatch):\n\n def mockgetpass(prompt='Password: '):\n return \"mockpass\"\n monkeypatch.setattr(getpass, \"getpass\", mockgetpass)\n\n args = [\"-c\", str(tmpconfigfile.path), \"-s\", \"example_nbour\", \"-P\"]\n _, conf = icat.config.Config(args=args).getconfig()\n\n ex = ExpectedConf(configFile=[tmpconfigfile.path],\n configSection=\"example_nbour\",\n url=ex_icat,\n auth=\"ldap\",\n username=\"nbour\",\n password=\"mockpass\",\n promptPass=True,\n credentials={'username': 'nbour', 'password': 'mockpass'})\n assert ex <= conf", "def test_init_v3_password(self):\r\n\r\n self._stubs_v3(method='password')\r\n self.m.ReplayAll()\r\n\r\n ctx = utils.dummy_context()\r\n ctx.auth_token = None\r\n ctx.trust_id = None\r\n heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)\r\n client = heat_ks_client.client\r\n self.assertIsNotNone(client)", "def test_all_command_line():\n assert read_settings('abc 123 -p testpre'.split()) == \\\n {'oauth_token': 'abc',\n 'oauth_secret': '123',\n 'app_key': 'RWmvpkGK4m9tavh4bCfdzsYjH',\n 'app_secret': 'uCShewTskeuBvt9haLi8LFARSJXkxJsCPNZ3dGwpYz4vuc5Mo9',\n 'config': 'stwark.cfg',\n 'prefix': 'testpre'}", "def test_vmware_service_resources_vm_password_get(self):\n pass", "def _load_pass(self):\n return keyring.get_password('PyBox', self.cfg.get('user', ''))", "def test_config_askpass(fakeClient, tmpconfigfile, monkeypatch):\n\n def mockgetpass(prompt='Password: '):\n return \"mockpass\"\n monkeypatch.setattr(getpass, \"getpass\", mockgetpass)\n\n args = [\"-c\", str(tmpconfigfile.path), \"-s\", \"example_root\",\n \"-a\", \"db\", \"-u\", \"rbeck\"]\n _, conf = icat.config.Config(args=args).getconfig()\n\n ex = ExpectedConf(configFile=[tmpconfigfile.path],\n configSection=\"example_root\",\n url=ex_icat,\n idsurl=ex_ids,\n auth=\"db\",\n username=\"rbeck\",\n password=\"mockpass\",\n promptPass=False,\n credentials={'username': 'rbeck', 'password': 'mockpass'})\n assert ex <= conf", "def init_password(self):\n\t\tself.vm_password = raw_input(\"Please enter vm password \")", "def test_empty_password_store_error(self):\n with TemporaryDirectory() as directory:\n program = PasswordStore(directory=directory)\n self.assertRaises(EmptyPasswordStoreError, program.smart_search)", "def password(self):\n return \"\"\"--password[=password]\"\"\"", "def prompt_for_password(args):\n if not args.password:\n args.password = getpass.getpass(\n prompt='Enter password for host %s and user %s: ' %\n (args.host, args.user))\n\n return args", "def test_ask_question__password(self, _):\n input_value = self.user_manager.ask_question('field', password=True)\n\n self.assertEqual(input_value, 'password')", "def test_set_random_initial_password(self):\n response = self.admin_client.post(\n reverse(\"user-list\"), {\"username\": \"Test name 9gt043qwvnj2d0cr\"}\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n user = User.objects.get(username=\"Test name 9gt043qwvnj2d0cr\")\n self.assertTrue(isinstance(user.default_password, str))\n self.assertTrue(len(user.default_password) >= 8)\n self.assertTrue(user.check_password(user.default_password))", "def test_get_password(self):\n random_password = random_string()\n entry = PasswordEntry(name=\"some/random/password\", store=object())\n set_property(entry, \"text\", \"\\n\".join([random_password, \"\", \"This is the description\"]))\n self.assertEquals(random_password, entry.password)", "def test_azure_service_api_vm_password_get(self):\n pass", "def test_014(self):\n\n HEADING()\n result = run(\"cm key default testkey\")\n print (result)\n assert \"OK.\" in result", "def test_password_exist(dbtransaction, auth_env):\n assert os.environ.get('AUTH_PASSWORD', 'muniri') is not None", "def test_cmd_mask_password(self):\n hook = SqoopHook()\n assert hook.cmd_mask_password([\"--password\", \"supersecret\"]) == [\"--password\", \"MASKED\"]\n\n cmd = [\"--target\", \"targettable\"]\n assert hook.cmd_mask_password(cmd) == cmd", "def test_not_ask_password_when_asked_but_password_is_set(mock_factory, getpass_mock):\n # force ask the password\n config = {\n 'ssh': {\n 'passwd': '',\n 'ask_passwd': True,\n },\n }\n\n task = ssh_passwd.SSHPassword(mock_factory(), config)\n\n # trigger action\n task.pre_start()\n\n getpass_mock.assert_not_called()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test error handling of invalid command line options.
def test_cli_invalid_option(self): returncode, output = run_cli(main, "-x", merged=True) assert returncode != 0 assert "Error:" in output
[ "def test_check_options_exception(self, hp, opts):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_is_in_options(hp, opts, msg=\"XXX\")", "def test_cli_option_errors(self):\n stderr = self.getCliErrorMessages(\n args=[\"__non_existent_wrapper__\", \"__non_existent_script__\"]\n )\n self.assertIn(\n \"Could not resolve '__non_existent_wrapper__'\",\n stderr,\n \"Wrong invalid option message\",\n )", "def test_missing_arg(self):\n parser, config_dict = set_args()\n with self.assertRaises(SystemExit):\n args = parser.parse_args(self.cmd_args[9])", "def ValidateOptions(self, opt, args):", "def wrong_option():\n return '-1'", "def test_tool_fail(self):\n args = parse_args(\"tests wrong_tool_spec\", use_shlex=True)\n self.assertEqual(args, 64)", "def check_required_option( option, string, exit_on_failure = False ):\n if option is None:\n print( string )\n if exit_on_failure:\n print( \"Exiting program due to above failures\" )\n sys.exit( 0 )", "def opt_validate (optparser):\n (options,args) = optparser.parse_args()\n if not options.fqfilename:\n optparser.print_help()\n sys.exit(1)\n if not options.species:\n optparser.print_help()\n sys.exit(1)\n if not options.dirOut:\n optparser.print_help()\n sys.exit(1)\n return options", "def test_invalid(sourcextractor):\n run = sourcextractor('--this-is-not-a-valid-flag')\n assert run.exit_code > 0\n assert 'unrecognised' in run.stderr", "def test_main_if_check_args(self):\n\n sys.argv[1:] = [1, 2, 3, 4]\n with self.assertRaises(SystemExit) as ctx:\n main()\n self.assertEqual(1, ctx.exception.code)", "def _reportCommandLineUsageErrorAndExit(parser, message):\n print parser.get_usage()\n print message\n sys.exit(1)", "def test_nested_exclusive_option_groups(self):\n self.assertRaises(SystemExit,\n self._test_options, [\"--test1\", \"--test2\"])", "def validate_args(argv):\n\tif len(argv) < 2:\n\t\tprint \"Insufficient command line arguments\"\n\t\tusage()\n\t\tsys.exit(-1)\n\tif len(argv) > 2:\n\t\tprint \"Too many command line arguments, extra arguments ignored\"", "def test_bad_args():\n\n rv, out = getstatusoutput(f'{prg} \"{bad_input1}\"')\n assert rv != 0\n error_string = \"Bad nucleotide sequence. Only ATCG allowed.\"\n assert re.findall(error_string, out, re.IGNORECASE)", "def test_bluetoothctl_with_invalid_args(self):\n\n output='Too many arguments: 2 > 1'\n self.assertEqual(parse(output, quiet=True), [])", "def test_negativeHostIndex(self):\n exc = self.assertRaises(\n UsageError,\n self.options.parseOptions,\n [\"--host-index=-1\", \"--hosts-count=2\", \"foo\"])\n self.assertEquals(\n str(exc),\n \"Specify a positive integer for host-index\")", "def test_missing_args():\n assert run([\"man\"]).stderr.startswith(\"man: Missing argument\\n\")", "def test_invalid_verbosity(self):\n v1 = _random_integer(start=4)\n args = parse_args(\"tests all -v {}\".format(v1), use_shlex=True)\n self.assertEqual(args, 64)\n v2 = -_random_integer(start=0)\n args = parse_args(\"tests all -v {}\".format(v2), use_shlex=True)\n self.assertEqual(args, 64)", "def _test_parse_args_fails(self, args: str) -> None:\n with self.assertRaises(OatmealParseError):\n OatmealMsg._parse_args(args.encode('ascii'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the output of ``qpass list``.
def test_cli_list(self): with TemporaryDirectory() as directory: touch(os.path.join(directory, "foo.gpg")) touch(os.path.join(directory, "foo/bar.gpg")) touch(os.path.join(directory, "Also with spaces.gpg")) returncode, output = run_cli(main, "--password-store=%s" % directory, "--list") assert returncode == 0 entries = output.splitlines() assert "foo" in entries assert "foo/bar" in entries assert "Also with spaces" in entries
[ "def test_list(self):\n stdout = six.StringIO()\n Switch.objects.create(name='switch1', active=True)\n Switch.objects.create(name='switch2', active=False)\n\n call_command('waffle_switch', list_switches=True, stdout=stdout)\n expected = 'Switches:\\nswitch1: on\\nswitch2: off'\n actual = stdout.getvalue().strip()\n self.assertEqual(actual, expected)", "def test_session_list(self):\n i,o,e = self.session.list()\n output = o.read(1024)\n self.assertTrue(output != '',\"output of list command is empty\")", "def test_list(self):\n stdout = six.StringIO()\n Flag.objects.create(name='test')\n\n call_command('waffle_flag', list_flags=True, stdout=stdout)\n expected = 'Flags:\\nNAME: test\\nSUPERUSERS: True\\nEVERYONE: None\\n' \\\n 'AUTHENTICATED: False\\nPERCENT: None\\nTESTING: False\\n' \\\n 'ROLLOUT: False\\nSTAFF: False\\nGROUPS: []'\n actual = stdout.getvalue().strip()\n self.assertEqual(actual, expected)", "def test_14_quer_prs_list(self):\n self.cmd_out = ['abcd']\n out = self.conn.quer(prs=('123', '456'))\n self.assertEquals(self.cmd_in, 'QUER 123 456')\n self.assertEquals(self.parse, False)\n self.assertEquals(out, ['abcd'])", "def testEchoList(self):\n e = Executor()\n result = e.execute([\"echo\", \"hello\"])\n self.assertEqual(\"hello\", result.stdout.strip())\n self.assertTrue(\"$ echo hello\" in e.log)", "def test_new_with_list_hashable():\n hash(Command(['echo', 'spam']))", "def test_list(self):\n stdout = six.StringIO()\n Sample.objects.create(name='test', percent=34)\n\n call_command('waffle_sample', list_samples=True, stdout=stdout)\n expected = 'Samples:\\ntest: 34.0%'\n actual = stdout.getvalue().strip()\n self.assertEqual(actual, expected)", "def test_list_contexts(self, mock_stdout_write):\n next_action()\n self.assertEqual(\n [call(\"@park\"), call(\"\\n\")], mock_stdout_write.call_args_list)", "def test_list_iterables(data, result_one):\n test_list = que_.Queue(data)\n assert len(test_list) == result_one", "def test_list_accepted_args(salt_key_cli, key_type):\n # Should not trigger any error\n ret = salt_key_cli.run(\"-l\", key_type)\n assert ret.returncode == 0\n assert \"error:\" not in ret.stdout\n # Should throw an error now\n ret = salt_key_cli.run(\"-l\", f\"foo-{key_type}\")\n assert ret.returncode != 0\n assert \"error:\" in ret.stderr", "def test_password_verifier_works(password):\n (input, result) = password\n print '\\n'\n print 'Inputs->' , input\n print 'Request->', result\n assert check_password(input) == result", "def test_alias_list_debug(self):\n config = {\n 'ALIASES': {\n 'alias1': 'command1',\n 'alias2': 'command2',\n }\n }\n\n with self.reviewboardrc(config, use_temp_dir=True):\n alias = self._create_alias_command(args=['--list', '-d'])\n self.assertTrue(alias.options.list_aliases)\n self.assertTrue(alias.options.debug)", "def test_prompting(self):\n pass", "def test_cli_list_plugins_aux(self):\n expected = [\n \"Available AUXILIARY plugins\",\n \"exploit\",\n \"smb\",\n \"bruteforce\",\n \"dos\",\n \"se\",\n \"rce\",\n \"selenium\",\n ]\n\n self.run_owtf(\"-l\", \"auxiliary\")\n self.assert_are_in_logs(expected, name=\"MainProcess\")", "def test_list_queries(self):\n pass", "def test_simple(self):\n known = []\n n_unique = 0\n for i in range(100):\n output = self.run_command(\"totd\", exitcode=0).replace(\"\\n\", \"\")\n if output not in known:\n known.append(output)\n n_unique += 1\n self.assertGreater(n_unique, 3)", "def print_list(auth_list):\n print '*' * 15 + str(len(auth_list)) + ' passwords found!' + '*' * 15\n for auth_info in auth_list:\n print 'Link : ' + auth_info['link']\n print 'User name : ' + auth_info['username']\n print 'Password : ' + auth_info['password']\n print '*' * 30", "def test_quiz_verify_list(self):\n with app.app_context():\n \n username = 'test_patient'\n #fetch the quiz\n rv = self.get_quiz(username)\n\n #load the response data\n resp = json.loads(rv.data)\n\n resp_quiz = resp['quiz']\n\n #check if the quiz is a list\n assert isinstance(resp_quiz,list)", "def selftest(self, timeout=60):\n with cpyrit.cpyrit.CPyrit() as cp:\n self.tell(\"Cores incorporated in the test:\")\n for i, core in enumerate(cp.cores):\n self.tell(\"#%i: '%s'\" % (i + 1, core))\n self.tell(\"\\nRunning selftest...\")\n workunits = []\n t = time.time()\n err = False\n while time.time() - t < timeout and not err:\n essid = random.choice(cpyrit.util.PMK_TESTVECTORS.keys())\n pws = []\n ref = cpyrit.util.PMK_TESTVECTORS[essid].keys()\n for i in xrange(random.randrange(10, 1000)):\n pws.append(random.choice(ref))\n workunits.append((essid, pws))\n cp.enqueue(essid, pws)\n while True:\n solvedPMKs = cp.dequeue(block=False)\n if solvedPMKs is not None:\n essid, pws = workunits.pop(0)\n for i, pw in enumerate(pws):\n ref = cpyrit.util.PMK_TESTVECTORS[essid][pw]\n if ref != solvedPMKs[i]:\n err = True\n break\n if err or not solvedPMKs:\n break\n if not err:\n for solvedPMKs in cp:\n essid, pws = workunits.pop(0)\n for i, pw in enumerate(pws):\n ref = cpyrit.util.PMK_TESTVECTORS[essid][pw]\n if ref != solvedPMKs[i]:\n err = True\n break\n if err or len(workunits) != 0 or len(cp) != 0:\n raise PyritRuntimeError(\"\\n!!! WARNING !!!\\nAt least some \" \\\n \"results seem to be invalid. This \" \\\n \"may be caused by a bug in Pyrit, \" \\\n \"faulty hardware or malicious \" \\\n \"network clients. Do not trust \" \\\n \"this installation...\\n\")\n else:\n self.tell(\"\\nAll results verified. Your installation seems OK\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the output of ``qpass exclude=... list``.
def test_cli_exclude(self): with TemporaryDirectory() as directory: touch(os.path.join(directory, "foo.gpg")) touch(os.path.join(directory, "foo/bar.gpg")) touch(os.path.join(directory, "Also with spaces.gpg")) returncode, output = run_cli(main, "--password-store=%s" % directory, "--exclude=*bar*", "--list") assert returncode == 0 entries = output.splitlines() assert "foo" in entries assert "foo/bar" not in entries assert "Also with spaces" in entries
[ "def test_3_exclude():\n run_main_and_compare([\"scrapbook_test_data\", \"tmp/test-exclude.rdf\", \"--exclude\", \"1\", \"4\"],\n \"samples/standard_1_4_excluded.rdf\", \"tmp/test-exclude.rdf\")", "def test_exclude_filelist(self):\n self.ParseTest([(\"--exclude-filelist\", \"file\")],\n [(), ('1',), ('1', '1'), ('1', '1', '2'),\n ('1', '1', '3')],\n [\"testfiles/select/1/1/1\\n\"\n \"+ testfiles/select/1/1\\n\"\n \"testfiles/select/1\\n\"\n \"- **\"])", "def test_exclude_mix(self):\n\n self.assertTrue(fnmatch.fnmatch('name', '*', exclude='test', flags=fnmatch.N | fnmatch.A))\n self.assertTrue(fnmatch.fnmatch(b'name', b'*', exclude=b'test', flags=fnmatch.N | fnmatch.A))\n self.assertFalse(fnmatch.fnmatch('test', '*', exclude='test', flags=fnmatch.N | fnmatch.A))\n self.assertFalse(fnmatch.fnmatch(b'test', b'*', exclude=b'test', flags=fnmatch.N | fnmatch.A))\n\n self.assertTrue(fnmatch.fnmatch('name', ['*', '!name'], exclude='test', flags=fnmatch.N | fnmatch.A))\n self.assertFalse(fnmatch.fnmatch('test', ['*', '!name'], exclude='test', flags=fnmatch.N | fnmatch.A))\n self.assertTrue(fnmatch.fnmatch('!name', ['*', '!name'], exclude='test', flags=fnmatch.N | fnmatch.A))", "def test_explicit_multiple_exclude(self):\n sel = hostslist.HostSelection(self.mockroot)\n for host in self.mockhosts:\n sel.select(host[0], host[1])\n for host in self.mockhosts:\n sel.exclude(host[0], host[1])\n\n result = sel.flatten()\n assert len(result) == 0, \\\n \"The returned host differs in size from the expected\"", "def test_tub_exclude(tub):\n ri = lambda fnm: int(os.path.basename(fnm).split('_')[1].split('.')[0])\n\n before = tub.gather_records()\n # Make sure we gathered records correctly\n assert len(before) == tub.get_num_records()\n tub.exclude.add(1)\n after = tub.gather_records()\n # Make sure we excluded the correct number of records\n assert len(after) == (tub.get_num_records() - 1)\n before = set([ri(f) for f in before])\n after = set([ri(f) for f in after])\n diff = before - after\n assert len(diff) == 1\n # Make sure we exclude the correct index\n assert 1 in diff", "def test_tub_exclude(tub):\n ri = lambda fnm : int( os.path.basename(fnm).split('_')[1].split('.')[0] )\n\n before = tub.gather_records()\n assert len(before) == tub.get_num_records() # Make sure we gathered records correctly\n tub.exclude.add(1)\n after = tub.gather_records()\n assert len(after) == (tub.get_num_records() - 1) # Make sure we excluded the correct number of records\n before = set([ri(f) for f in before])\n after = set([ri(f) for f in after])\n diff = before - after\n assert len(diff) == 1\n assert 1 in diff # Make sure we exclude the correct index", "def test_filter_scan_skiplist():\n data = phony_scan_data()\n skiplist = [2, 4, 6]\n\n result = filter_scans(data, skiplist=skiplist)\n expected_result = [x for x in data if int(x[0]) not in skiplist]\n\n assert result == expected_result", "def test_excludes(self):\n\n self.assertFalse(isiterable([], exclude=(list,) + string_types))", "def test_explicit_single_exclude(self):\n sel = hostslist.HostSelection(self.mockroot)\n for host in self.mockhosts:\n sel.select(host[0], host[1])\n namespace = self.mockhosts[3][0]\n host = self.mockhosts[3][1]\n sel.exclude(namespace, host)\n result = sel.flatten()\n print len(self.mockhosts)\n print len(result), (len(self.mockhosts) - 1)\n assert len(result) == (len(self.mockhosts) - 1), \\\n \"The returned host differs in size from the expected\"\n print host, result\n assert host not in result, \"The excluded host was found in the result\"", "def test_translate_exclude_mix(self):\n\n results = fnmatch.translate(['*', '!test'], exclude=b'test', flags=fnmatch.N | fnmatch.A)\n self.assertTrue(len(results[0]) == 2 and len(results[1]) == 1)", "def test_exclude_after_scan(self):\n self.root = Path(\"testfiles/select2/3\")\n self.ParseTest([(\"--include\", \"testfiles/select2/3/**file.txt\"),\n (\"--exclude\", \"testfiles/select2/3/3sub2\"),\n (\"--include\", \"testfiles/select2/3/3sub1\"),\n (\"--exclude\", \"**\")],\n [(), ('3sub1',), ('3sub1', '3sub1sub1'), ('3sub1', '3sub1sub2'), ('3sub1', '3sub1sub3'),\n ('3sub3',), ('3sub3', '3sub3sub2'), ('3sub3', '3sub3sub2', '3sub3sub2_file.txt')])", "def test_filter_scan_seqlist_and_skiplist():\n data = phony_scan_data()\n seqlist = [1, 2, 3, 4, 5]\n skiplist = [2, 4, 6]\n\n result = filter_scans(data, seqlist=seqlist, skiplist=skiplist)\n expected_result = [x for x in data if int(x[0]) in set(seqlist) - set(skiplist)]\n\n assert result == expected_result", "def remove_excluded_players(self):\n for excluded_plyr_name in self._get_exclude_players_list():\n for i, p in enumerate(self.lineup):\n if p['name'] == excluded_plyr_name:\n self.logger.info(f\"Excluding {excluded_plyr_name} from lineup\")\n del self.lineup[i]\n break\n for i, p in enumerate(self.bench):\n if p['name'] == excluded_plyr_name:\n self.logger.info(f\"Excluding {excluded_plyr_name} from bench\")\n del self.bench[i]\n break", "def get_excluded_items():", "def test_list_excluded_projects(self, mock_stdout_write):\n next_action()\n self.assertEqual(\n [call(\"-+NewProject\"), call(\"\\n\")], mock_stdout_write.call_args_list)", "def test_mutualExclusionExcludesByKeyword(self):\n\n @_mutuallyExclusiveArguments([[\"a\", \"b\"]])\n def func(a=3, b=4):\n return a + b\n\n self.assertRaises(TypeError, func, a=3, b=4)", "def test_exclude_chars(self):\n pg = PasswordGenerator()\n pg.excludeuchars=\"A\"\n self.assertNotIn(\"A\",pg.generate())", "def test_ignorearg(self):\n self.assertEqual(check_args(self.ignorearg), {})", "def test_runWithWarningsSuppressedUnfiltered(self):\n filters = [((\"ignore\", \".*foo.*\"), {}),\n ((\"ignore\", \".*bar.*\"), {})]\n self.runWithWarningsSuppressed(filters, warnings.warn, \"don't ignore\")\n self.assertEqual(\n [\"don't ignore\"], [w['message'] for w in self.flushWarnings()])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the command line usage message.
def test_cli_usage(self): for options in [], ["-h"], ["--help"]: returncode, output = run_cli(main, *options) assert "Usage:" in output
[ "def usage():\n if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help':\n print(usageMsg)\n exit(0)", "def usage (usageString):\n command = os.path.basename (sys.argv[0])\n if (usageString):\n sys.stderr.write ('usage: %s %s\\n' % (command, usageString))\n else:\n sys.stderr.write ('Error in parsing command line arguments.\\n')\n return", "def usage(msg=None):\n if msg:\n print(\"%s\\n\" % msg)\n print(\"\\nUsage: %s\" % usageMsg())\n exit()", "def test_help_message(capsys):\n with pytest.raises(SystemExit):\n parse_command_line_args(test_override=['-h'])\n help_message = capsys.readouterr().out\n assert 'Prints the complement of the input DNA sequence' in help_message\n assert 'usage' in help_message\n assert 'help' in help_message\n assert 'prints the reverse complement' in help_message", "def test_missing_args():\n assert run([\"man\"]).stderr.startswith(\"man: Missing argument\\n\")", "def usage():\n nagios_return('UNKNOWN',\n \"usage: %s -H host -P port -u user -p pass [-n node]\" % sys.argv[0])", "def _UsageMessage():\n usage = textwrap.dedent(\"\"\"\\\n Usage: %s <target> [<target2> ...] --bazel <bazel_binary_path> [options]\n\n Where options are:\n --verbose [-v]\n Increments the verbosity of the script by one level. This argument\n may be provided multiple times to enable additional output levels.\n\n --bazel_bin_path <path>\n Path at which Bazel-generated artifacts may be retrieved.\n \"\"\" % sys.argv[0])\n\n return usage", "def usage():\n\n\tsys.stderr.write(__doc__)\n\tsys.exit()", "def usage():\n\tprint(\"Usage: %s data_filename.txt\" % sys.argv[0])\n\tsys.exit(1)", "def test_was_help_used(self):\n\n with self.subTest('--help'):\n self.assertTrue(CommandlineParsingHelper.was_help_used(['--help']))\n\n with self.subTest('-h'):\n self.assertTrue(CommandlineParsingHelper.was_help_used(['-h']))\n\n with self.subTest('Not used - empty string'):\n self.assertFalse(CommandlineParsingHelper.was_help_used([]))\n\n with self.subTest('Not used - non empty string - :tasks --print'):\n self.assertFalse(CommandlineParsingHelper.was_help_used([':tasks', '--print']))", "def test_help() -> None:\n runner = click.testing.CliRunner()\n result = runner.invoke(sut.main, [\"--help\"])\n assert result.exit_code == 0 # nosec", "def print_usage(err_msg, usage):\n print(usage)\n print('Error: ' + err_msg)", "def _reportCommandLineUsageErrorAndExit(parser, message):\n print parser.get_usage()\n print message\n sys.exit(1)", "def test_help_negative():\n\n invalid_topic = 'invalid'\n runner = CliRunner()\n result = runner.invoke(help, [invalid_topic])\n assert 'Error' in result.output", "def test_cli_help():\n runner = CliRunner()\n help_result = runner.invoke(cli.main, [\"--help\"])\n assert help_result.exit_code == 0\n assert \"Show this message and exit.\" in help_result.output", "def test_cli_help():\n runner = CliRunner()\n help_result = runner.invoke(cli.main, ['--help'])\n assert help_result.exit_code == 0\n assert 'Show this message and exit.' in help_result.output", "def check_arguments():\n global nargs, progname\n nargs = len(sys.argv) - 1\n progname = os.path.basename(sys.argv[0])\n flag = True\n if nargs != 0 and N_ARGUMENTS[-1] == '*':\n flag = False\n else:\n for i in N_ARGUMENTS:\n if nargs == i:\n flag = False\n if flag:\n usage()", "def print_simple_usage():\n print 'usage: hn [open | view | update | list | help]'", "def usage(self):\n usage = '%%(prog)s %s [options] %s' % (self.name, self.args)\n\n if self.description:\n return '%s\\n\\n%s' % (usage, self.description)\n else:\n return usage" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the detection whether the clipboard should be used.
def test_clipboard_enabled(self): # Make sure the clipboard is enabled by default on macOS. if platform.system().lower() == "darwin": assert is_clipboard_supported() is True else: # Make sure the clipboard is used when $DISPLAY is set. with PatchedItem(os.environ, "DISPLAY", ":0"): assert is_clipboard_supported() is True # Make sure the clipboard is not used when $DISPLAY isn't set. environment = os.environ.copy() environment.pop("DISPLAY", None) with PatchedAttribute(os, "environ", environment): assert is_clipboard_supported() is False
[ "def can_copy(self):\n return self._control.textCursor().hasSelection()", "def enablePaste(self) -> bool:\n ...", "def canPaste(self, availableFlavors: List[java.awt.datatransfer.DataFlavor]) -> bool:\n ...", "def can_paste(self):\n if self._control.textInteractionFlags() & QtCore.Qt.TextEditable:\n return bool(QtGui.QApplication.clipboard().text())\n return False", "def user32_IsClipboardFormatAvailable(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"format\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def use_host_clipboard(self):\n ret = self._get_attr(\"useHostClipboard\")\n return ret", "def canCopy(self) -> bool:\n ...", "def CanCopy(self):\n return self.GetSelectionStart() != self.GetSelectionEnd()", "def CanPaste(self):\n return stc.StyledTextCtrl.CanPaste(self) and self.CanEdit()", "def canCopySpecial(self) -> bool:\n ...", "def is_text_available(self) -> bool:\n return self.clipboard.wait_is_text_available()", "def CanCut(self):\n return self.CanCopy() and self.CanEdit()", "def clipboard_mode(self):\n ret = self._get_attr(\"clipboardMode\")\n return ClipboardMode(ret)", "def updatePasteAvail(self):\n mime = QtGui.QApplication.clipboard().mimeData()\n self.allActions['EditPaste'].setEnabled(len(mime.data('text/xml') or\n mime.data('text/plain'))\n > 0)\n focusWidget = QtGui.QApplication.focusWidget()\n if hasattr(focusWidget, 'pastePlain'):\n focusWidget.updateActions()", "def shouldCopy(self) -> \"SbBool\":\n return _coin.SoEngine_shouldCopy(self)", "def clipboard_copy(text: str) -> None:\n if pyperclip:\n pyperclip.copy(text)\n elif shutil.which(\"xclip\"):\n subprocess.run(\n [\"xclip\", \"-in\", \"-selection\", \"clipboard\"],\n input=text,\n text=True,\n check=True,\n )\n else:\n raise RuntimeError(\"No way to copy\")", "def _is_paste(keys):\n # Consider paste when it contains at least one newline and at least one\n # other character.\n text_count = 0\n newline_count = 0\n\n for k in keys:\n if isinstance(k.key, six.text_type):\n text_count += 1\n if k.key == Keys.ControlJ:\n newline_count += 1\n\n return newline_count >= 1 and text_count > 1", "def copySpecial(self, copyType: ghidra.app.util.ClipboardType, monitor: ghidra.util.task.TaskMonitor) -> java.awt.datatransfer.Transferable:\n ...", "def set_clipboard(clipboard):\n global copy, paste\n\n clipboard_types = {\n \"windows\": init_windows_clipboard,\n \"no\": init_no_clipboard,\n }\n\n if clipboard not in clipboard_types:\n raise ValueError('Argument must be one of %s' % (', '.join([repr(_) for _ in clipboard_types.keys()])))\n\n # Sets pyperclip's copy() and paste() functions:\n copy, paste = clipboard_types[clipboard]()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test editing of an entry on the command line.
def test_edit_entry(self): # Create a fake password store that we can test against. with TemporaryDirectory() as directory: touch(os.path.join(directory, "Personal", "Zabbix.gpg")) touch(os.path.join(directory, "Work", "Zabbix.gpg")) # Make sure we're not running the real `pass' program because its # intended purpose is user interaction, which has no place in an # automated test suite :-). with MockedProgram("pass"): returncode, output = run_cli(main, "--password-store=%s" % directory, "--edit", "p/z", merged=True) assert returncode == 0 assert "Matched one entry: Personal/Zabbix" in output
[ "def test_command_edit(self):\n pass", "def test_admin_edit_approved_entry(self):\r\n self.client.logout()\r\n self.login_user(self.superuser)\r\n\r\n url, entry, data = self.edit_entry_helper()\r\n\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 200)\r\n\r\n response = self.client.post(url, data=data, follow=True)\r\n self.assertEqual(response.status_code, 200)\r\n self.assertContains(response,\r\n 'The entry has been updated successfully.')\r\n\r\n self.assertEqual(self.user, entry.user)", "def test_cet_line(self):\n test_data = \"Here is some text\"\n self.edit.set_edit_text(test_data)\n\n self.assertEqual(self.edit.get_line(0), test_data)", "def test_user_edit_approved_entry(self):\r\n url, entry, data = self.edit_entry_helper()\r\n\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 404)\r\n\r\n response = self.client.post(url, data=data)\r\n self.assertEqual(response.status_code, 404)", "def test_readonly_and_editable_edits(self):\n self.assertEqual(self.dlg.Edit2.get_value(), \"Editable\")\n self.assertTrue(self.dlg.Edit2.is_editable())\n self.assertEqual(self.dlg.Edit1.get_value(), \"ReadOnly\")\n self.assertFalse(self.dlg.Edit1.is_editable())", "def test_repo_edit(self):\n pass", "def test_select_entry_interactive(self):\n with TemporaryDirectory() as directory:\n touch(os.path.join(directory, \"foo.gpg\"))\n touch(os.path.join(directory, \"bar.gpg\"))\n touch(os.path.join(directory, \"baz.gpg\"))\n # Select entries using the command line filter 'a' and then use\n # interactive selection to narrow the choice down to 'baz' by\n # specifying the unique substring 'z'.\n program = PasswordStore(directory=directory)\n with CaptureOutput(input=\"z\"):\n entry = program.select_entry(\"a\")\n assert entry.name == \"baz\"", "def test_get_value(self):\n test_data = \"Some value\"\n self.edit.set_edit_text(test_data)\n\n self.assertEqual(self.edit.get_value(), test_data)", "def test_editables_flag(pip_test_package_script: PipTestEnvironment) -> None:\n result = pip_test_package_script.pip(\"list\", \"--editable\", \"--format=json\")\n result2 = pip_test_package_script.pip(\"list\", \"--editable\")\n assert {\"name\": \"simple\", \"version\": \"1.0\"} not in json.loads(result.stdout)\n assert os.path.join(\"src\", \"pip-test-package\") in result2.stdout", "def testCurrentBehaviour(self):\n self.assertContains('enwiki_help_editing', 'Editing')", "def test_repo_edit_git_hook(self):\n pass", "def test_project_edit_no_change(runner, project):\n (project.path / \"README.md\").write_text(\"Make repo dirty.\")\n\n commit_sha_before = project.repository.head.commit.hexsha\n\n result = runner.invoke(cli, [\"project\", \"edit\"], catch_exceptions=False)\n\n assert 0 == result.exit_code, format_result_exception(result)\n assert \"Nothing to update.\" in result.output\n\n commit_sha_after = project.repository.head.commit.hexsha\n assert commit_sha_after == commit_sha_before\n assert project.repository.is_dirty()", "def test_edit(self):\n\n p = Person.objects.first()\n p.first_name = 'John'\n p.save()\n logs = ModelChange.objects.filter(model='Person',\n instance_pk=p.pk)\n self.assertTrue(logs.exists())\n self.assertEqual(logs.last().type, 'edit')", "def ctxEditMode(*args, **kwargs):\n\n pass", "def test_edit_view(self):\n c = self.c\n response = c.get(reverse('wiki:edit', kwargs={'path': ''}))\n self.assertContains(response, 'Edit')", "def test_todo_can_be_edited(self):\n rv = self.client().post(\n '/todos/',\n data=self.todo)\n self.assertEqual(rv.status_code, 201)\n rv = self.client().put(\n '/todos/1',\n data={\n \"title\": \"Dont just eat, but also pray and love :-)\"\n })\n self.assertEqual(rv.status_code, 200)\n results = self.client().get('/todos/1')\n self.assertIn('Dont just eat', str(results.data))", "def editandcheck(self, phrase, expect=None):\n if expect is None:\n expect = phrase\n result = self.pc.set_edit_text(phrase, self.hashnum)\n self.assertIn(\"Plan changed successfully\", str(result))\n plan, server_hashnum = self.pc.get_edit_text()\n self.hashnum = server_hashnum # for later cleanup\n self.assertEqual(expect, plan)", "def edit(ctx, module_name, version, editor):\n module_tree = ctx.obj.check_module_tree()\n loader = ctx.obj.check_module(\n module_tree, module_name, version, log_error_and_wait_for_confirmation\n )\n call([editor, loader.moduledotfile_path()])", "def test_option_appears_on_edit(self):\n self.login(\"testuser\", \"password\")\n\n url = u\"/composers/translate/edit?appid=%s&srclang=all_ALL&editSelectedSourceButton=&targetlang=all_ALL&srcgroup=ALL&targetgroup=ALL\" % (self.firstApp)\n rv = self.flask_app.get(url)\n assert rv.status_code == 200\n data = rv.data.decode(\"utf8\") # This bypasses an apparent utf8 FlaskClient bug.\n assert \"Transfer Ownership\" in data\n assert \"/translate/transfer_ownership\" in data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the EmptyPasswordStoreError exception.
def test_empty_password_store_error(self): with TemporaryDirectory() as directory: program = PasswordStore(directory=directory) self.assertRaises(EmptyPasswordStoreError, program.smart_search)
[ "def test_missing_password_store_error(self):\n with TemporaryDirectory() as directory:\n missing = os.path.join(directory, \"missing\")\n program = PasswordStore(directory=missing)\n self.assertRaises(MissingPasswordStoreError, program.ensure_directory_exists)", "def test_no_matching_password_error(self):\n with TemporaryDirectory() as directory:\n touch(os.path.join(directory, \"Whatever.gpg\"))\n program = PasswordStore(directory=directory)\n self.assertRaises(NoMatchingPasswordError, program.smart_search, \"x\")", "def testLoginPasswordEmpty(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"\"))", "def test_invalid_password_signup(self):\n\n with self.assertRaises(ValueError) as context:\n User.signup(\"email@email.com\", \"\", \"testtest\", \"Jane\", \"Test\", \"I am Jane Test.\")\n \n with self.assertRaises(ValueError) as context:\n User.signup(\"email@email.com\", None, \"testtest\", \"Jane\", \"Test\", \"I am Jane Test.\")", "def test_validate_missing_password(self, schema):\n\n data = {\n 'email': 'test@email.com',\n 'password_confirmation': \"password\"\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['password']", "def test_no_password_getter(user):\n with raises(AttributeError):\n user.password", "def test_register_missing_password(self):\n del self.reg_data['password']\n self.register(msg='password is required', code=422)", "def test_validate_on_invalid_value(self):\n args = (0, enums.SecretDataType.PASSWORD)\n self.assertRaises(TypeError, objects.SecretData, *args)", "def test_user_creation_with_no_password_returns_error(self):\n data ={\"email\":\"zac@gmail.com\",\"password\":\"\"}\n self.assertEqual(create_user(data),\"Please enter all details\")", "def test_incorrect_password_login(self):\n self.reg_data['password'] = 'wrongpas'\n self.login(code=401, msg='Invalid password: Enter right password to login')", "def test_create_superuser_fail_no_password(self):\n with self.assertRaises(Exception) as context:\n self.user.create_superuser(\n username='superuser_rocks', email='superuser@gmail.com', password='')\n self.assertRaises(\n TypeError, 'Superusers must have a password.' in str(context.exception))", "def test_password_not_provided_to_validate_function(self):\n data_no_password = {\n \"email\":\"user@gmail.com\",\n \"password\":None\n }\n with self.assertRaises(ValidationError) as e:\n LoginSerializer().validate(data_no_password)\n self.assertEqual(e.exception.args[0], 'A password is required to log in.')", "def test_no_password(self):\n\n response = self.client.post(REGISTRATION_URL,\n json.dumps({\"username\": \"Dirk\"}),\n content_type=\"application/json\")\n\n self.assertContains(response,\n CONTENT_MISSING_PASSWORD,\n status_code=HTTP_400_BAD_REQUEST)\n\n self.assertEqual(get_user_model().objects.count(), 0)", "def test_check_pw_failure(dbtransaction, auth_env):\n from .. security import check_password\n password = 'not secret'\n assert check_password(password) is False", "def test_validate_missing_password_confirmation(self, schema):\n\n data = {\n 'email': 'test@email.com',\n 'password': 'password',\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['password_confirmation']", "def testLoginBadCredentialCombination(self):\n self.assertEquals(UserModel.ERR_BAD_CREDENTIALS, self.users.login(\"user2\", \"badpassword\"))", "def test_user_profile_invalid_token():\n clear()\n user = auth_register(\"test@test.com\", \"password\", \"firstName\", \"lastName\")\n # Logging out invalidates your token\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n user_profile(user['token'], user['u_id'])", "def test_empty_user_reset(self):\n data = {'email': ' '}\n self.reset_password(data=data, code=400,\n msg='Enter Valid Email')", "def test_password_exist(dbtransaction, auth_env):\n assert os.environ.get('AUTH_PASSWORD', 'muniri') is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test human friendly formatting of password store entries.
def test_format_text(self): entry = PasswordEntry(name="some/random/password", store=object()) set_property(entry, "text", random_string()) self.assertEquals( # We enable ANSI escape sequences but strip them before we # compare the generated string. This may seem rather pointless # but it ensures that the relevant code paths are covered :-). dedent(ansi_strip(entry.format_text(include_password=True, use_colors=True))), dedent( """ some / random / password Password: {value} """, value=entry.text, ), )
[ "def test_get_password(self):\n random_password = random_string()\n entry = PasswordEntry(name=\"some/random/password\", store=object())\n set_property(entry, \"text\", \"\\n\".join([random_password, \"\", \"This is the description\"]))\n self.assertEquals(random_password, entry.password)", "def DwfPassword(self) -> str:", "def testLoginPassword128Long(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"abcdefghijklmnopqrstuvwxyz\n abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy\"))", "def test_default_style(self):\n self.assertEqual(FormatAuth([\"Test\"]), \\\n FormatAuth([\"Test\"], style=\"summary\"))", "def test_password_type(self):\n self.assertEqual(type(User.password), str)", "def verifyPlaintextPassword(password):", "def test_valid_passport_format():\n assert valid_passport_format(\"FWO9A-B8MDF-TGXW5-H49SO-HI5VE\") == True\n\n try:\n assert valid_passport_format(9083 - 9876 - 4659 - 3845 - 9345 - 3845)\n except TypeError:\n return True\n\n try:\n assert valid_passport_format(\"asdfadsf\")\n except AssertionError:\n return True", "def testFormat(self):\n meta = self.session.create_metabolome()\n\n self.util.stringTypeTest(self, meta, \"format\")\n\n self.util.stringPropertyTest(self, meta, \"format\")", "def test_repr(self):\n key = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n args = \"value={0}, data_type={1}\".format(\n binascii.hexlify(self.bytes_a), enums.SecretDataType.PASSWORD)\n expected = \"SecretData({0})\".format(args)\n observed = repr(key)\n self.assertEqual(expected, observed)", "def test_derive_device_authentication_password(self):\n assert derive_device_authentication_password(\"trustme\") == bytes.fromhex(\n \"e1 58 e4 01 20 47 bd 6c c4 1a af bc 5c 04 c1 fc\"\n )", "def testLoginUsername128Long(self):\n self.assertEquals(UserModel.ERR_BAD_USERNAME, self.users.login(\"abcdefghijklmnopqrstuvwxyz\n abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy\",\n \"password\"))", "def no_space(password):\r\n for character in password:\r\n if (ord(character) == 32):\r\n return False\r\n return True", "def checkWalletPasswordFormat(func):\n def wrapper(*args,**kwargs):\n password=args[1].data.get(\"password\");\n if re.match(wallet_password_pattern,password):\n return func(*args,**kwargs);\n raise WalletPasswordVlidationError\n return wrapper;", "def test_func_pformat_printable_unicode():\n value = ('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST'\n 'UVWXYZ!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ \\t\\n\\r\\x0b\\x0c')\n\n assert pformat(value, height=1, width=60) == (\n '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX')\n\n assert pformat(value, height=8, width=60) == (\n r'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!'\n r\"\"\"\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\")", "def pass_validation(password, action):\n if action == 'signup':\n if (not re.match(r'\\S*(\\S*([a-zA-Z]\\S*[0-9])|([0-9]\\S*[a-zA-Z]))\\S*', password) or\n len(password) < 8):\n return \"Password should be alphanumeric with at least 8 characters\"", "def test_get_help_text_matches_django(self):\n assert (\n PwnedPasswordsValidator().get_help_text()\n == CommonPasswordValidator().get_help_text()\n )", "def test_check_password(self) -> None:\n self.assertTrue(check_pwd('2AbhhE'))\n self.assertTrue(check_pwd('0hj454@6hBH'))\n self.assertTrue(check_pwd('1ja!2AB'))\n # less than 4\n self.assertFalse(check_pwd('4XY'))\n self.assertFalse(check_pwd('aa'))\n self.assertFalse(check_pwd(''))", "def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password yields the same hash\n self.assertTrue(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))", "def test_password_can_contain_utf8_chars(superuser):\n password = '▨☺♪∈∀∃' * 40 # test a really long password, just to make sure.\n user = User(email='foo@bar.com', full_name='Foo Bar', password=password)\n user.save_as(superuser)\n assert user.check_password(password)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test getting a password from an entry.
def test_get_password(self): random_password = random_string() entry = PasswordEntry(name="some/random/password", store=object()) set_property(entry, "text", "\n".join([random_password, "", "This is the description"])) self.assertEquals(random_password, entry.password)
[ "def test_password_field(self):\n\n rv = self.client.get('/register')\n assert 'Password' in rv.data", "def test_ask_question__password(self, _):\n input_value = self.user_manager.ask_question('field', password=True)\n\n self.assertEqual(input_value, 'password')", "def test_check_pw_success(dbtransaction, auth_env):\n from .. security import check_password\n password = 'muniri'\n assert check_password(password)", "def test_check_pw_failure(dbtransaction, auth_env):\n from .. security import check_password\n password = 'not secret'\n assert check_password(password) is False", "def test_verify_password(db: Session, democontent: None) -> None:\n user = User.by_username(\"one\", db)\n assert user.verify_password(\"secret\") # type: ignore\n assert not user.verify_password(\"invalid\") # type: ignore", "def test_password_verifier_works(password):\n (input, result) = password\n print '\\n'\n print 'Inputs->' , input\n print 'Request->', result\n assert check_password(input) == result", "def verifyPlaintextPassword(password):", "def test_password_check(tmp_path):\n db = BlogPost(build_db_path(tmp_path))\n\n assert db.sign_up_entry('Khandokar', 'password')\n\n assert db.password_check('Khandokar', 'password')\n\n assert db.password_check('Khandokar', 'wrong_password') is False\n\n assert db.password_check('non_existent_user', 'password') is False", "def test_check_password(self) -> None:\n self.assertTrue(check_pwd('2AbhhE'))\n self.assertTrue(check_pwd('0hj454@6hBH'))\n self.assertTrue(check_pwd('1ja!2AB'))\n # less than 4\n self.assertFalse(check_pwd('4XY'))\n self.assertFalse(check_pwd('aa'))\n self.assertFalse(check_pwd(''))", "def test_view_pw(self):\n rsc = resources.get_by_name(\"host1.example.com\")\n self.open_url('/resource/view/{0}'.format(rsc.id))\n \n user0 = rsc.passwords.filter_by(username='user0').one()\n \n el = self.wd.find_element(By.ID, \"pw{0}\".format(user0.id))\n self.assertFalse(el.is_displayed())\n \n link = self.wd.find_element(By.ID, \"lnk{0}\".format(user0.id))\n \n \n link.click()\n \n def is_displayed(el):\n if el.is_displayed():\n return el\n \n found_el = WebDriverWait(self.wd, 10).until(lambda d: is_displayed(d.find_element(By.ID, \"pw{0}\".format(user0.id))))\n \n self.assertEqual(user0.password_decrypted, el.get_attribute(\"value\"))", "def validate_password(self, value):\n validate_password(value)\n return value", "def test_no_password_getter(user):\n with raises(AttributeError):\n user.password", "def test_derive_user_password(self):\n assert derive_user_password(\"secret\") == bytes.fromhex(\n \"03 fc ed b6 66 60 25 1e c8 1a 1a 71 69 01 69 6a\"\n )", "def copypasswordTest(self):\n self.newAccount.saveAccount()\n Credential.copyPwd('moriinga03')\n self.assertEqual(self.newAccount.l_password, pyperclip.paste())", "def _get_password(site, login):\n try:\n return keyring.get_password(site, login)\n except:\n print(\"It appears the keyring module doesn't support your platform.\")\n return None", "def check_password(self, password):\n assert self.password and password\n return self.password == util.get_hash(password)", "def test_derive_device_authentication_password(self):\n assert derive_device_authentication_password(\"trustme\") == bytes.fromhex(\n \"e1 58 e4 01 20 47 bd 6c c4 1a af bc 5c 04 c1 fc\"\n )", "def test_vmware_service_resources_vm_password_get(self):\n pass", "def getpass(prompt='Password: '):\n\t\n\tpassword = console.secure_input(prompt)\n\treturn password" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the MissingPasswordStoreError exception.
def test_missing_password_store_error(self): with TemporaryDirectory() as directory: missing = os.path.join(directory, "missing") program = PasswordStore(directory=missing) self.assertRaises(MissingPasswordStoreError, program.ensure_directory_exists)
[ "def test_empty_password_store_error(self):\n with TemporaryDirectory() as directory:\n program = PasswordStore(directory=directory)\n self.assertRaises(EmptyPasswordStoreError, program.smart_search)", "def test_no_matching_password_error(self):\n with TemporaryDirectory() as directory:\n touch(os.path.join(directory, \"Whatever.gpg\"))\n program = PasswordStore(directory=directory)\n self.assertRaises(NoMatchingPasswordError, program.smart_search, \"x\")", "def test_incorrect_password_login(self):\n self.reg_data['password'] = 'wrongpas'\n self.login(code=401, msg='Invalid password: Enter right password to login')", "def test_no_password_getter(user):\n with raises(AttributeError):\n user.password", "def test_validate_missing_password(self, schema):\n\n data = {\n 'email': 'test@email.com',\n 'password_confirmation': \"password\"\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['password']", "def test_check_pw_failure(dbtransaction, auth_env):\n from .. security import check_password\n password = 'not secret'\n assert check_password(password) is False", "def test_register_missing_password(self):\n del self.reg_data['password']\n self.register(msg='password is required', code=422)", "def test_incorrectPassword(self):\n response = base64.encodestring('%s:%s' % (\n self.username, 'incorrectPassword'))\n\n d = self.credentialFactory.decode(response, _trivial_GET)\n return d.addCallback(\n lambda creds: self.failIf(creds.checkPassword(self.password)))", "def test_validate_on_invalid_value(self):\n args = (0, enums.SecretDataType.PASSWORD)\n self.assertRaises(TypeError, objects.SecretData, *args)", "def test_invalid_password_signup(self):\n\n with self.assertRaises(ValueError) as context:\n User.signup(\"email@email.com\", \"\", \"testtest\", \"Jane\", \"Test\", \"I am Jane Test.\")\n \n with self.assertRaises(ValueError) as context:\n User.signup(\"email@email.com\", None, \"testtest\", \"Jane\", \"Test\", \"I am Jane Test.\")", "def testLoginBadCredentialCombination(self):\n self.assertEquals(UserModel.ERR_BAD_CREDENTIALS, self.users.login(\"user2\", \"badpassword\"))", "def test_password_exist(dbtransaction, auth_env):\n assert os.environ.get('AUTH_PASSWORD', 'muniri') is not None", "def test_secret_not_found(testkeychain):\n\n with pytest.raises(LookupError) as excinfo:\n assert testkeychain.get(service=\"testsvc\", username=\"testuser\")\n\n assert str(excinfo.value) == (\n \"No secret found for 'testsvc' service and 'testuser' username \"\n \"in 'system' keychain.\"\n )", "def test_user_authenticate_password_invalid(self):\n\n self.assertFalse(User.authenticate(self.user.username, \"asdfdasfwer\"))", "def test_password_not_provided_to_validate_function(self):\n data_no_password = {\n \"email\":\"user@gmail.com\",\n \"password\":None\n }\n with self.assertRaises(ValidationError) as e:\n LoginSerializer().validate(data_no_password)\n self.assertEqual(e.exception.args[0], 'A password is required to log in.')", "def test_validate_missing_password_confirmation(self, schema):\n\n data = {\n 'email': 'test@email.com',\n 'password': 'password',\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['password_confirmation']", "def test_password_file_not_existent(self, runner):\n result = runner.invoke(\n main.run, CLI_ARGS.format(pw_option=f\"--password-file /does/not/exist\").split(\" \")\n )\n assert result.exit_code == 2\n assert (\n \"File '/does/not/exist' does not exist.\" in result.output\n or \"File \\\"/does/not/exist\\\" does not exist.\" in result.output\n )", "def test_get_rabbitmq_password_failure():\n with pytest.raises(KeyError):\n config = CORTXS3Config(use_cipher = False)\n del config._config['rabbitmq']['password']\n assert config.get_rabbitmq_password()", "def testLoginPasswordEmpty(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the NoMatchingPasswordError exception.
def test_no_matching_password_error(self): with TemporaryDirectory() as directory: touch(os.path.join(directory, "Whatever.gpg")) program = PasswordStore(directory=directory) self.assertRaises(NoMatchingPasswordError, program.smart_search, "x")
[ "def test_incorrect_password_login(self):\n self.reg_data['password'] = 'wrongpas'\n self.login(code=401, msg='Invalid password: Enter right password to login')", "def test_incorrectPassword(self):\n response = base64.encodestring('%s:%s' % (\n self.username, 'incorrectPassword'))\n\n d = self.credentialFactory.decode(response, _trivial_GET)\n return d.addCallback(\n lambda creds: self.failIf(creds.checkPassword(self.password)))", "def testLoginBadCredentialCombination(self):\n self.assertEquals(UserModel.ERR_BAD_CREDENTIALS, self.users.login(\"user2\", \"badpassword\"))", "def test_check_pw_failure(dbtransaction, auth_env):\n from .. security import check_password\n password = 'not secret'\n assert check_password(password) is False", "def test_user_authenticate_password_invalid(self):\n\n self.assertFalse(User.authenticate(self.user.username, \"asdfdasfwer\"))", "def test_incorrect_password(self):\n if _debug: TestDeviceCommunicationControl._debug(\"test_incorrect_password\")\n\n # create a network\n anet = ApplicationNetwork()\n\n # add the service capability to the IUT\n anet.iut.add_capability(WhoIsIAmServices)\n anet.iut.add_capability(DeviceCommunicationControlServices)\n\n # set the password\n anet.iut_device_object._dcc_password = \"xyzzy\"\n\n # test sequence\n anet.td.start_state.doc(\"7-6-0\") \\\n .send(DeviceCommunicationControlRequest(\n destination=anet.iut.address,\n timeDuration=1,\n enableDisable='disable',\n password=\"plugh\",\n )).doc(\"7-6-1\") \\\n .receive(Error,\n errorClass='security',\n errorCode='passwordFailure',\n ).doc(\"7-6-2\") \\\n .success()\n\n # no IUT application layer matching\n anet.iut.start_state.success()\n\n # run the group\n anet.run()", "def testLoginPasswordEmpty(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"\"))", "def test_not_compromised(self):\n suffix = self.sample_password_suffix.replace(\"A\", \"3\")\n validator = PwnedPasswordsValidator(\n api_client=api.PwnedPasswords(client=self.http_client(suffix=suffix))\n )\n validator.validate(self.sample_password)", "def test_no_password_getter(user):\n with raises(AttributeError):\n user.password", "def test_validate_on_invalid_value(self):\n args = (0, enums.SecretDataType.PASSWORD)\n self.assertRaises(TypeError, objects.SecretData, *args)", "def test_check_password(self) -> None:\n self.assertTrue(check_pwd('2AbhhE'))\n self.assertTrue(check_pwd('0hj454@6hBH'))\n self.assertTrue(check_pwd('1ja!2AB'))\n # less than 4\n self.assertFalse(check_pwd('4XY'))\n self.assertFalse(check_pwd('aa'))\n self.assertFalse(check_pwd(''))", "def test_register_missing_password(self):\n del self.reg_data['password']\n self.register(msg='password is required', code=422)", "def test_incorrect_initial_password(self):\n self.passwords['old_password'] = 'wrongpas'\n self.change_password(code=401, msg='Enter Valid Password: Old password is wrong')", "def test_validate_missing_password(self, schema):\n\n data = {\n 'email': 'test@email.com',\n 'password_confirmation': \"password\"\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['password']", "def test_compromised(self):\n validator = PwnedPasswordsValidator(\n api_client=api.PwnedPasswords(client=self.http_client())\n )\n with self.assertRaisesMessage(\n ValidationError, str(validator.error_message[\"singular\"])\n ):\n validator.validate(self.sample_password)", "def test_password_not_match(self, schema):\n\n data = {\n 'email': 'test@email.com',\n 'password': 'password',\n 'password_confirmation': \"sdasfs\"\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['_schema']", "def test_invalid_password_signup(self):\n\n with self.assertRaises(ValueError) as context:\n User.signup(\"email@email.com\", \"\", \"testtest\", \"Jane\", \"Test\", \"I am Jane Test.\")\n \n with self.assertRaises(ValueError) as context:\n User.signup(\"email@email.com\", None, \"testtest\", \"Jane\", \"Test\", \"I am Jane Test.\")", "def test_passwords_must_match_registration(self):\n errorMsg = 'The two passwords must match.'\n rv = self.register('mister_test', 'mister_test@example.com',\n 'password1', 'password2')\n assert errorMsg in rv.data", "def test_user_authenticate_password_invalid(self):\n\n user = User.authenticate(self.user2.username, 'password1')\n self.assertFalse(user)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test interactive password selection.
def test_select_entry_interactive(self): with TemporaryDirectory() as directory: touch(os.path.join(directory, "foo.gpg")) touch(os.path.join(directory, "bar.gpg")) touch(os.path.join(directory, "baz.gpg")) # Select entries using the command line filter 'a' and then use # interactive selection to narrow the choice down to 'baz' by # specifying the unique substring 'z'. program = PasswordStore(directory=directory) with CaptureOutput(input="z"): entry = program.select_entry("a") assert entry.name == "baz"
[ "def test_ask_question__password(self, _):\n input_value = self.user_manager.ask_question('field', password=True)\n\n self.assertEqual(input_value, 'password')", "def test_password_prompt(self, fake_getpass, fake_stderr):\n cli_args = ['--clusters', 'myCluster', '--location', '/foo', '--username', 'pat']\n\n iiqtools_cluster_backup.parse_args(cli_args)\n\n fake_getpass.assert_called()", "def test_set_password_mode(self):\n self.server_widget.password_mode = 'silent'\n assert self.client_widget.password_mode == self.server_widget.password_mode", "def test_view_pw(self):\n rsc = resources.get_by_name(\"host1.example.com\")\n self.open_url('/resource/view/{0}'.format(rsc.id))\n \n user0 = rsc.passwords.filter_by(username='user0').one()\n \n el = self.wd.find_element(By.ID, \"pw{0}\".format(user0.id))\n self.assertFalse(el.is_displayed())\n \n link = self.wd.find_element(By.ID, \"lnk{0}\".format(user0.id))\n \n \n link.click()\n \n def is_displayed(el):\n if el.is_displayed():\n return el\n \n found_el = WebDriverWait(self.wd, 10).until(lambda d: is_displayed(d.find_element(By.ID, \"pw{0}\".format(user0.id))))\n \n self.assertEqual(user0.password_decrypted, el.get_attribute(\"value\"))", "def test_prompting(self):\n pass", "def _page_password(self):\n return self._open(self.app.page_password)", "def practice(aString):\n\tattempt = input(\"Password: \")\n\twhile True:\n\t\tprint(\"\\t{}\".format(attempt == aString))\n\t\tif \"exit\" == attempt:\n\t\t\tbreak\n\t\tattempt = input(\"Password: \")", "def getpass(prompt='Password: '):\n\t\n\tpassword = console.secure_input(prompt)\n\treturn password", "def password(title,height,width,text):\n command=\"dialog --clear --title \\\"\" + title + \"\\\" --password \\\"\" + \\\n\t text + \"\\\" \" + `height` + \" \" + `width` + \\\n\t \" 2>&1 > /dev/tty\"\n diag=os.popen(command)\n ans=diag.read()\n r=diag.close()\n if r:\n\treturn 0\n else:\n\treturn ans", "def input_password(self, selector: str, password: str):\n with self.playwright.grpc_channel() as stub:\n try:\n # Should prevent logging in case of failure keywords\n previous_level = BuiltIn().set_log_level(\"NONE\")\n stub.InputText(\n playwright_pb2.inputTextRequest(input=password, selector=selector)\n )\n finally:\n BuiltIn().set_log_level(previous_level)", "def test_select(self):\n self.edit.set_edit_text(\"Some text\")\n\n self.edit.select(0, 0)\n self.assertEqual((0, 0), self.edit.selection_indices())\n\n self.edit.select()\n self.assertEqual((0, 9), self.edit.selection_indices())\n\n self.edit.select(1, 7)\n self.assertEqual((1, 7), self.edit.selection_indices())\n\n self.edit.select(5, 2)\n self.assertEqual((2, 5), self.edit.selection_indices())\n\n self.edit.select(\"me t\")\n self.assertEqual((2, 6), self.edit.selection_indices())\n\n self.assertRaises(RuntimeError, self.edit.select, \"123\")", "def copypasswordTest(self):\n self.newAccount.saveAccount()\n Credential.copyPwd('moriinga03')\n self.assertEqual(self.newAccount.l_password, pyperclip.paste())", "def runLogin(scr, hardMode, username, password):\n curses.use_default_colors()\n scr.erase()\n scr.move(0, 0)\n\n curses.noecho()\n scr.scrollok(True)\n\n slowWrite(scr, HEADER_TEXT + '\\n\\n')\n\n if hardMode:\n # use must enter the correct text to proceed\n entry = ''\n while entry.upper() != ENTRY.upper() + username.upper():\n slowWrite(scr, '> ')\n entry = upperInput(scr)\n else:\n # input is entered for them\n slowWrite(scr, '> ')\n curses.napms(INPUT_PAUSE)\n slowWrite(scr, ENTRY + username.upper() + '\\n', TYPE_DELAY)\n\n slowWrite(scr, '\\n' + PASSWORD_PROMPT + '\\n\\n')\n\n if hardMode:\n # use must enter the correct text to proceed\n entry = ''\n while entry.upper() != password.upper():\n if entry:\n slowWrite(scr, PASSWORD_ERROR + '\\n\\n')\n \n slowWrite(scr, '> ')\n entry = upperInput(scr, True)\n else:\n # input is entered for them\n slowWrite(scr, '> ')\n curses.napms(INPUT_PAUSE)\n password_stars = HIDDEN_MASK * len(password)\n slowWrite(scr, password_stars + '\\n', TYPE_DELAY)\n\n curses.napms(500)", "def askpass_main():\n\n verbose = os.getenv('PSSH_ASKPASS_VERBOSE')\n\n # It's not documented anywhere, as far as I can tell, but ssh may prompt\n # for a password or ask a yes/no question. The command-line argument\n # specifies what is needed.\n if len(sys.argv) > 1:\n prompt = sys.argv[1]\n if verbose:\n sys.stderr.write('pssh-askpass received prompt: \"%s\"\\n' % prompt)\n if not (prompt.strip().lower().endswith('password:') or 'enter passphrase for key' in prompt.strip().lower()):\n sys.stderr.write(prompt)\n sys.stderr.write('\\n')\n sys.exit(1)\n else:\n sys.stderr.write('Error: pssh-askpass called without a prompt.\\n')\n sys.exit(1)\n\n address = os.getenv('PSSH_ASKPASS_SOCKET')\n if not address:\n sys.stderr.write(textwrap.fill(\"pssh error: SSH requested a password.\"\n \" Please create SSH keys or use the -A option to provide a\"\n \" password.\"))\n sys.stderr.write('\\n')\n sys.exit(1)\n\n sock = socket.socket(socket.AF_UNIX)\n try:\n sock.connect(address)\n except socket.error:\n _, e, _ = sys.exc_info()\n message = e.args[1]\n sys.stderr.write(\"Couldn't bind to %s: %s.\\n\" % (address, message))\n sys.exit(2)\n\n try:\n password = sock.makefile().read()\n except socket.error:\n sys.stderr.write(\"Socket error.\\n\")\n sys.exit(3)\n\n print(password)", "def test_user(self, username, password, guess_db = 'Local Database'):\n result = True\n\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_GUEST_ACCESS, 1)\n\n self.s.select_option(self.info['loc_cfg_guestaccess_auth_server_option'], guess_db)\n self.s.click_and_wait(self.info['loc_cfg_guestaccess_guestpass_apply_button'])\n guestpass_url = self.s.get_text(self.info['loc_cfg_guestaccess_guestpass_url_span'])\n\n logging.info(\"Navigate to the guestpass url: '%s'\" % guestpass_url)\n self.s.open(guestpass_url)\n time.sleep(1)\n logging.info(\"Fill authentication username '%s' and password '%s'\" % (username, password))\n self.s.type_text(self.info['loc_guestpass_username_textbox'], username)\n self.s.type_text(self.info['loc_guestpass_password_textbox'], password)\n self.s.click_and_wait(self.info['loc_guestpass_login_button'], 5)\n\n if self.s.is_element_present(self.info['loc_guestpass_loginfailed_div'], 0.2) or\\\n not self.s.is_element_present(self.info['loc_guestinfo_next_button'], 0.2):\n result = False\n\n self.s.open(self.url)\n self.current_tab = self.LOGIN_PAGE\n\n return result", "def init_password(self):\n\t\tself.vm_password = raw_input(\"Please enter vm password \")", "def test_vmware_service_resources_vm_password_get(self):\n pass", "def toggle_password(self, show_password):\n textbox_type = self.get_attribute(\n 'type', WD_PF.SELENIUM.SHOW_PASSWORD_INPUT\n )\n\n if show_password and textbox_type == 'password':\n self.button(WD_PF.SELENIUM.SHOW_PASSWORD)", "def askPopPassword(self):\n import getpass\n prompt = 'Password for %s on %s?' % (self.popUser, self.popServer)\n return getpass.getpass(prompt)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the element (x) to be pushed in greater than self.min, append to stack If the element (x) to be pushed is smaller than self.min, then append 2x self.min into stack and update self.min
def push(self,x): if not self.stack: self.stack.append(x) self.min = x return if x >= self.min: self.stack.append(x) else: self.stack.append((2*x) - self.min) self.min = x print("After pushing element {}: {}, min is {}".format(x, self.stack, self.min), end='\n\n')
[ "def pop(self):\n if not self.stack:\n return\n\n top = self.stack[-1]\n self.stack.pop()\n if top < self.min:\n self.min = (2*self.min) - top\n\n print(\"After popping element {}: {}, min is {}\".format(top, self.stack, self.min), end='\\n\\n')", "def stack_min(self):\n if not self.next_min[-1]: return self.stack[-1]\n return min(self.next_min[-1], self.stack[-1])", "def push(self, item):\n if item > self._max:\n self._max = item\n self._max_idx = len(self._stk)\n self._stk.append((self._max_idx, item))", "def pop(self) -> None:\n self.stack.pop()\n self.minStack.pop()\n if self.isEmpty():\n self.globalMinimum = float('inf')\n else:\n self.globalMinimum = self.minStack[-1]", "def push(self, x):\n \n self.stack.append(x)\n self.index += 1", "def main():\n minstack = MinStackOptimal()\n # minstack.push(0)\n minstack.push(5)\n minstack.push(1)\n minstack.push(6)\n minstack.push(0)\n minstack.push(3)\n\n print(minstack.get_min())\n minstack.pop()\n print(minstack.get_min())\n minstack.pop()\n print(minstack.get_min())\n print(\"Printing top elem: {}\".format(minstack.peek()))\n\n sample = [10, 5, 0, -1, 0, -1, 0]\n print(\"********* Using Optimal solution ***********************\")\n print(\"List of min values are: {}\".format(min_stack_optimal_helper(sample)))\n print(\"***********************************************************\")", "def populate_stack(S:TheStack, x:int):\n print(\"Adding\", x, \"element(s) to the stack.\")\n print(\"Added: \", end=\"\")\n for i in range(0, x):\n if i != x - 1:\n print(i, end=\",\")\n else:\n print(i, \"to the stack in that order.\")\n S.push(i)", "def min(self):\n\n if self.top is None:\n raise IndexError(\"min from empty stack\")\n else:\n return self.top.min", "def push(self,data):\n try:\n cur = 0\n while self.isFull(cur):\n cur += 1\n self.stack[cur].append(data)\n except IndexError:\n self.stack.append([])\n self.subStackIndex += 1\n self.stack[self.subStackIndex].append(data)", "def push_and_limit(limit, stack, data_type):\n for i in range(limit):\n stack.push(data_type(str(random.randint(10, 20))))\n print(stack.stack)\n try:\n stack.push(data_type(str(random.randint(10, 20))))\n print(\"- LimitExceedError doesn't work\")\n except LimitExceedError:\n print('+ LimitExceedError generated right!')\n if len(stack.stack) == limit:\n print('+ Push method works')\n print('+ Limit of stack items is ' + str(len(stack.stack) == limit))\n else:\n print('- Push method finished with mistakes')\n print('- Limit of stack items is ' + str(len(stack.stack) == limit))", "def push(stack, value):\n i = Item()\n i.below = stack.top\n i.value = value\n stack.top = i", "def remove_min(self): # 5\r\n if self.is_empty():\r\n raise Empty('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def test_stackBehavior(self):\r\n self.assertTrue(self.st.isEmpty())\r\n self.st.push(99)\r\n self.st.push(50)\r\n self.st.push(25)\r\n self.assertEqual(25,self.st.pop())\r\n self.assertEqual(50,self.st.pop())\r\n self.assertEqual(99,self.st.pop())\r\n self.assertTrue(self.st.isEmpty())", "def push_up(self, i: int) -> None:\n c = i # current index\n p = self.parent_index(i)\n\n if p != -1 and self.heap[c] < self.heap[p]:\n c = p\n\n if c != i:\n self.swap(c, i)\n self.push_up(c)", "def autoAppendStack(self,stack,el):\n if len(stack) > self.stackSize :\n stack.pop(0)\n stack.append(el)", "def push_if_lower(self, double value, int reference):\n if self.push_if_lower_fast(value, reference) == -1:\n raise ValueError(\"reference outside of range [0, max_reference]\")\n return self._pushed == 1", "def delMin(self):\n self.heapList[1] = self.heapList[self.currentSize]\n self.heapList.pop()\n self.currentSize -= 1\n self.percDown(1)", "def shift_stacks(self):\n if not self.stack2:\n while self.stack1:\n self.stack2.append(self.stack1.pop())", "def push(self, predictedProb, currProb, wordKey, currSent):\n # the predicted prob is made negative because heapq operates as a min heap so by making the\n # probability negative so we can make it behave like a max heap, we just need to multiply it\n # by negative one when we pop\n listEntry = (-predictedProb, currProb, wordKey, list(currSent))\n heapq.heappush(self.heap, listEntry)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the popped element(y) is greater than self.min then do nothing If the popped element(y) is smaller than self.min then update self.min = 2self.min y
def pop(self): if not self.stack: return top = self.stack[-1] self.stack.pop() if top < self.min: self.min = (2*self.min) - top print("After popping element {}: {}, min is {}".format(top, self.stack, self.min), end='\n\n')
[ "def push(self,x):\n if not self.stack:\n self.stack.append(x)\n self.min = x\n return\n if x >= self.min:\n self.stack.append(x)\n else:\n self.stack.append((2*x) - self.min)\n self.min = x\n print(\"After pushing element {}: {}, min is {}\".format(x, self.stack, self.min), end='\\n\\n')", "def pop(self) -> None:\n self.stack.pop()\n self.minStack.pop()\n if self.isEmpty():\n self.globalMinimum = float('inf')\n else:\n self.globalMinimum = self.minStack[-1]", "def delMin(self):\n self.heapList[1] = self.heapList[self.currentSize]\n self.heapList.pop()\n self.currentSize -= 1\n self.percDown(1)", "def get_min(self):\n self.min_pos, self.min_value = min(self.fitness_dict.items(), key=lambda x: x[1])\n\n # Add min value to collector\n self.min_value_list.append(self.min_value)\n\n # check whatever the list is empty if not append only new maximum threshold\n if not self.threshold_list:\n self.threshold_list['threshold'].append(self.min_value)\n self.threshold_list['time_step'].append(self.time_step)\n elif self.min_value > max(self.threshold_list['threshold']):\n self.threshold_list['threshold'].append(self.min_value)\n self.threshold_list['time_step'].append(self.time_step)", "def remove_min(self): # 5\r\n if self.is_empty():\r\n raise Empty('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def _update_min(self):\n if self.is_leaf():\n self.min = self.keys[1]\n else:\n cur = self\n while not cur.is_leaf():\n cur = cur.children[0]\n self.min = cur.keys[1]", "def _min_max_update(self, bound, cost):\n if cost < self.min_cost: self.min_cost = cost\n if cost > self.max_cost: self.max_cost = cost\n if bound < self.min_bound: self.min_bound = bound\n if bound > self.max_bound: self.max_bound = bound\n \n # Recall in self.best we have [best_bound, best_cost, worst_cost] \n if bound > self.best[0] and np.abs(bound-self.best[0]) > self.bound_tol:\n # If bound beyond threshold, keep it all\n self.best = np.array([bound, cost, cost])\n self.best_layout = deepcopy(self.layout)\n \n elif np.abs(bound-self.best[0]) < self.bound_tol:\n # If bound within threshold\n if cost < self.best[1]: \n self.best[0], self.best[1] = bound, cost \n self.best_layout = deepcopy(self.layout)\n elif cost > self.best[2]: \n self.best[2] = cost", "def skip_min_points(self):\n min = 100\n for key, value in self._do_list.items():\n if value.get_points(2) < min:\n min = value.get_points(2)\n task = (key, value)\n self._do_list.pop(task[0])\n self._skip_list.update({task[0]: task[1]})", "def _post_setattr_minimum(self, old, new):\n if new > self.maximum:\n self.maximum = new\n if new > self.high_value:\n self.high_value = new\n if new > self.low_value:\n self.low_value = new", "def _post_setattr_maximum(self, old, new):\n if new < self.minimum:\n self.minimum = new\n if new < self.low_value:\n self.low_value = new\n if new < self.high_value:\n self.high_value = new", "def stack_min(self):\n if not self.next_min[-1]: return self.stack[-1]\n return min(self.next_min[-1], self.stack[-1])", "def updateFromAdjacentSquare(self, square):\r\n if square.mined():\r\n self.numberOfAdjacentMines += 1", "def _calc_min(self):\n return np.min(self.get_points()) - 1", "def _post_setattr_minimum(self, old, new):\n if new > self.maximum:\n self.maximum = new\n if new > self.time:\n self.time = new", "def remove_min(self):\n p = self._find_min() # find the position\n item = self._data.delete(p) # delete the position and return the element\n return item._key, item._value", "def nudge(self):\n if self.x > self.targetX:\n self.x -= 1\n elif self.x < self.targetX:\n self.x += 1", "def removeMin(self):\r\n if self._heap:\r\n minElem = self.min()\r\n element = self._heap.pop()\r\n # get element at bottom of heap\r\n if len(self._heap) > 0:\r\n element._index = 0\r\n self._heap[0] = element\r\n # swap element at bottom of heap into top\r\n self.bubbleDown(element)\r\n return minElem\r\n else:\r\n return None", "def _post_setattr_maximum(self, old, new):\n if new < self.minimum:\n self.minimum = new\n if new < self.time:\n self.time = new", "def min(self):\n return self.x.min(), self.y.min()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
minstack = MinStackBruteForce() minstack.push(0) minstack.push(5) minstack.push(1) minstack.push(6) minstack.push(0) minstack.push(3) print(minstack.get_min()) minstack.pop() print(minstack.get_min()) minstack.pop() print(minstack.get_min()) sample = [10, 5, 0, 1, 0, 1, 0] print(" Using Bruteforce solution ")
def main(): minstack = MinStackOptimal() # minstack.push(0) minstack.push(5) minstack.push(1) minstack.push(6) minstack.push(0) minstack.push(3) print(minstack.get_min()) minstack.pop() print(minstack.get_min()) minstack.pop() print(minstack.get_min()) print("Printing top elem: {}".format(minstack.peek())) sample = [10, 5, 0, -1, 0, -1, 0] print("********* Using Optimal solution ***********************") print("List of min values are: {}".format(min_stack_optimal_helper(sample))) print("***********************************************************")
[ "def stack_min(self):\n if not self.next_min[-1]: return self.stack[-1]\n return min(self.next_min[-1], self.stack[-1])", "def pop(self):\n if not self.stack:\n return\n\n top = self.stack[-1]\n self.stack.pop()\n if top < self.min:\n self.min = (2*self.min) - top\n\n print(\"After popping element {}: {}, min is {}\".format(top, self.stack, self.min), end='\\n\\n')", "def pop(self) -> None:\n self.stack.pop()\n self.minStack.pop()\n if self.isEmpty():\n self.globalMinimum = float('inf')\n else:\n self.globalMinimum = self.minStack[-1]", "def push(self,x):\n if not self.stack:\n self.stack.append(x)\n self.min = x\n return\n if x >= self.min:\n self.stack.append(x)\n else:\n self.stack.append((2*x) - self.min)\n self.min = x\n print(\"After pushing element {}: {}, min is {}\".format(x, self.stack, self.min), end='\\n\\n')", "def test_static_stack(self):\n print(\"Testing the stack\")\n s = ArrayStack(4)\n self.assertEqual(None, s.pop())\n print(\"I expect an empty error above\")\n for i in range(3, 8, 2):\n s.push(i)\n self.assertEqual(7, s.pop())\n self.assertEqual(5, s.pop())\n self.assertEqual(3, s.pop())\n self.assertEqual(None, s.pop())\n print(\"I expect empty error above\")\n for i in range(7):\n s.push(i)\n print(\"I expect three full error above\")", "def test_stackBehavior(self):\r\n self.assertTrue(self.st.isEmpty())\r\n self.st.push(99)\r\n self.st.push(50)\r\n self.st.push(25)\r\n self.assertEqual(25,self.st.pop())\r\n self.assertEqual(50,self.st.pop())\r\n self.assertEqual(99,self.st.pop())\r\n self.assertTrue(self.st.isEmpty())", "def main():\r\n queue = createQueue()\r\n sum=0\r\n max=0\r\n numCards= int(input(\" Enter the number of cards used in the deck:\"))\r\n\r\n min=queue.size\r\n numGames= int ( input( \" Enter the number of games to play in the simulation\"))\r\n for i in range (numGames):\r\n vic = createStack()\r\n dis1 = createStack()\r\n dis2 = createStack()\r\n c = 1\r\n for j in range(numCards):\r\n enqueue(queue, c)\r\n c += 1\r\n for i in range(1, random.randint(1, queue.size)):\r\n shuffle(queue)\r\n getFirst(vic,dis1,queue.front.data)\r\n dequeue(queue)\r\n while emptyQueue(queue) == False:\r\n algo(queue,vic,dis1,dis2)\r\n count=fillVictory(vic,dis1,dis2)\r\n if count > max:\r\n max=count\r\n if count < min:\r\n min=count\r\n sum = sum +count\r\n average= float(sum/numGames)\r\n print (\"The average number of cards on the victory pile \", average)\r\n print (\"The maximum number of cards ever achieved on victory pile \", max)\r\n print (\"The minimum number of cards ever achieved on victory pile \", min)", "def do_steepest_ascent_hill_climbing(tweak_function = swap_function):\n #Initialization step\n current_fitness = None\n current = generate_random_permutation()\n iteration = 200 #number of iterations, you can change it\n number_of_tweaks = 10 #number of tweaks, you can change it\n \n while(iteration>=0):\n iteration -=1\n current_fitness = fitness_function(current) #calculating fitness\n #print('current',current, current_fitness)\n if current_fitness == 28:\n break\n #Modification step\n #generates next step and calculates fitness\n \n neighbour = generate_next_state(current,tweak_function)\n \n neighbour_fitness = fitness_function(neighbour)\n #print('neighbour',neighbour, neighbour_fitness)\n #Choosing new generation from candidates\n for i in range(1,number_of_tweaks):\n \n candidate_neighbour = generate_next_state(current,tweak_function) \n candidate_neighbour_fitness = fitness_function(neighbour)\n if neighbour_fitness < candidate_neighbour_fitness:\n #print(\"assigning\")\n neighbour = candidate_neighbour\n \n \n if current_fitness < neighbour_fitness:\n #print(\"assigning\")\n current = neighbour\n\n return current,current_fitness", "def coding_problem_01(stack):\n queue = deque([]) # stack S:[1,2,3,4,5], queue Q:[]\n for cnt in range(len(stack) - 1): # move stack into queue. S:[1], Q:[5,4,3,2]\n queue.append(stack.pop())\n for cnt in range(len(queue) // 2):\n stack.append(queue.popleft()) # S:[1,5], Q:[4,3,2]\n for cnt2 in range(len(queue) - 1): # rotate last element to front, S:[1,5], Q:[2,4,3]\n queue.append(queue.popleft())\n stack.append(queue.popleft()) # S:[1,5,2], Q:[4,3]\n if queue:\n stack.append(queue.popleft())\n return stack", "def min(self):\n\n if self.top is None:\n raise IndexError(\"min from empty stack\")\n else:\n return self.top.min", "def test_list_stacks(self):\n pass", "def min_cost(pipes):\n\n # Write your code here!\n heapq.heapify(pipes)\n total = 0\n\n while len(pipes) > 1:\n first = heapq.heappop(pipes)\n second = heapq.heappop(pipes)\n\n total += first + second\n heapq.heappush(pipes, first+second)\n\n return total", "def inspect_branch(stack, point, current_best):\n while len(stack) != 0:\n root = stack.pop()\n d = distance(point, root.location)\n current_best = (root.location, d) if d < current_best[\n 1] else current_best\n stack.append(root.right_child) if root.right_child else None\n stack.append(root.left_child) if root.left_child else None\n return current_best", "def prune(self):\n hypBest = self.getHypothesisByPosition(self.bestPosition)\n if hypBest.failed:\n log_error(\"[AllInOneCubePruner] failed to build 0,0,0 hyp !!!\")\n stack_output = []\n self.lastBestScore = 0\n else:\n stack_output = [hypBest]\n self.lastBestScore = hypBest.getScore()\n\n while len(stack_output) < self.size:\n # expand currently best lattice\n self.expandEdges()\n\n if self.needStop : break\n\n # pop the best hyp of edges to output\n hypBest = self.popBestHypothesis()\n self.lastBestScore = hypBest.getScore()\n stack_output.append( hypBest )\n\n stack_output.sort( key=lambda x:x.getScore() , reverse=True )\n return stack_output", "def min_temp(self):\n result = List(7)\n node = self._graphs_data_list.head().one_way[0].head().next\n while node is not None:\n result.add(node.item)\n node = node.next\n return result", "def brute(gen):\n\ttriangle = build_triangle(gen)\n\tsmallest = triangle[0][0]\n\tfor r, row in enumerate(triangle):\n\t\tfor e, n in enumerate(row):\n\t\t\tfor size in range(r-e+1):\n\t\t\t\tst = sub_triangle_sum(r,e,size,triangle)\n\t\t\t\tpoint = (r + 1) * r // 2 + e\n\t\t\t\t#stdout.write(\"%6d: (%5d, %5d, %5d): %d\\r\" %(point,r,e,size, st))\n\t\t\t\t#stdout.flush()\n\t\t\t\tsmallest = min(smallest, st)\n\treturn smallest", "def __init__(self, stack=[]):\n self.stack = stack", "def remove_from_stack(stack):\n stack.pop()\n return stack", "def bootstrapfstworker(queue, fstlist, datasetsize, numberofreps, value):\n\timport random\n\tnumberover = 0\n\tfor k in range(numberofreps):\n\t\tfstvalue = float(sum(random.choice(fstlist) for l in range(datasetsize)) / datasetsize)\n\t\tif fstvalue >= value:\n\t\t\tnumberover += 1\n\tqueue.put(numberover)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wraps the jenkins head configuration and substitute the server alias with one of the server configuration provided by serverAliasesList headConfiguration the configuration of one jenkins head serverAliasesList a list with server configurations
def __init__(self, headConfiguration, serverAliasesList: list): self.__checkHeadConfiguration(headConfiguration) self.__checkRequiredServerConfiguration(headConfiguration, serverAliasesList) self.__mHeadConfiguration = headConfiguration self.__mServerConfiguration = self.__removeUnnecessaryServerConfig(headConfiguration, serverAliasesList)
[ "def _expand_variables(self, config, hostname):\n\n if 'hostname' in config:\n config['hostname'] = config['hostname'].replace('%h', hostname)\n else:\n config['hostname'] = hostname\n\n if 'port' in config:\n port = config['port']\n else:\n port = SSH_PORT\n\n user = os.getenv('USER')\n if 'user' in config:\n remoteuser = config['user']\n else:\n remoteuser = user\n\n host = socket.gethostname().split('.')[0]\n fqdn = LazyFqdn(config, host)\n homedir = os.path.expanduser('~')\n replacements = {'controlpath':\n [\n ('%h', config['hostname']),\n ('%l', fqdn),\n ('%L', host),\n ('%n', hostname),\n ('%p', port),\n ('%r', remoteuser),\n ('%u', user)\n ],\n 'identityfile':\n [\n ('~', homedir),\n ('%d', homedir),\n ('%h', config['hostname']),\n ('%l', fqdn),\n ('%u', user),\n ('%r', remoteuser)\n ],\n 'proxycommand':\n [\n ('%h', config['hostname']),\n ('%p', port),\n ('%r', remoteuser)\n ]\n }\n\n for k in config:\n if k in replacements:\n for find, replace in replacements[k]:\n if isinstance(config[k], list):\n for item in range(len(config[k])):\n if find in config[k][item]:\n config[k][item] = config[k][item].\\\n replace(find, str(replace))\n else:\n if find in config[k]:\n config[k] = config[k].replace(find, str(replace))\n return config", "def test_override_of_subset_of_server_options(self):\n config_file = \"%s/config_server_overrides_partial.conf\" % self.test_data_path\n oconfig = ocelog.config.Config(config_file)\n self.assertEqual(oconfig.server.port, 5555) # override of \"8888\" \n self.assertEqual(oconfig.server.host, \"localhost\") # default", "def set_deploy_config():\n\n\n host = \"ec2-174-129-125-34.compute-1.amazonaws.com:8000\"\n\n # BROKEN until fix yoeman build - wtf\n # Set directory for distribution scripts: \n # Assumes parallelspider/spiderweb/dist/scripts/ \n #directory_path = path + \"spiderweb/dist/scripts/\"\n directory_path = path\n\n # Find renamed services file\n contents = os.listdir(directory_path)\n for f in contents:\n if \"service\" in f:\n # avoid backups\n if \"~\" not in f:\n service_file = f\n\n # Replace host and mock info in dist version of config service file\n file_path = directory_path + service_file\n for line in fileinput.input(file_path, inplace=1):\n new_host_line = line.replace(\"localhost:8000\", host)\n # not setting mock yet\n new_mock_line = new_host_line.replace(\"mock = true\", \"mock = false\")\n print \"%s\" % (new_mock_line),\n #print \"%s\" % (new_host_line),", "def test_override_for_all_the_server_options(self):\n config_file = \"%s/config_server_overrides_all.conf\" % self.test_data_path\n oconfig = ocelog.config.Config(config_file)\n self.assertEqual(oconfig.server.port, 7777) # override of \"8888\"\n self.assertEqual(oconfig.server.host, \"127.0.0.1\") # override of \"localhost\"", "def server_ln(alias):\n server_src = '/srv/{alias}/conf/{alias}.conf'.format(alias=alias)\n server_dest = '/etc/init/{alias}.conf'.format(alias=alias)\n\n nginx_src = '/srv/{alias}/conf/{alias}.nginx'.format(alias=alias)\n nginx_dest = '/etc/nginx/sites-enabled/{alias}'.format(alias=alias)\n\n # Remove any existing links at the destinations\n with settings(warn_only=True):\n sudo('rm {dest}'.format(dest=server_dest))\n sudo('rm {dest}'.format(dest=nginx_dest))\n\n # Remove the default site from nginx\n sudo('rm /etc/nginx/sites-enabled/default')\n\n # Create the symbolic links\n sudo('ln -s {src} {dest}'.format(src=server_src, dest=server_dest))\n sudo('ln -s {src} {dest}'.format(src=nginx_src, dest=nginx_dest))\n sudo('initctl reload-configuration')", "def _kong_apply_config(self, container):\n\n # This dictionary could be also used to initialize Kong environment vars, so\n # we list most commonly used options here as an example.\n # see https://docs.konghq.com/gateway-oss/2.5.x/configuration/#environment-variables\n context = {\n \"KONG_ADMIN_ACCESS_LOG\": \"/dev/stdout\",\n \"KONG_ADMIN_ERROR_LOG\": \"/dev/stderr\",\n \"KONG_ADMIN_GUI_ACCESS_LOG\": \"/dev/stdout\",\n \"KONG_ADMIN_GUI_ERROR_LOG\": \"/dev/stderr\",\n \"KONG_ADMIN_LISTEN\": \"{} http2 ssl\".format(KONG_ADMIN_API),\n \"KONG_CLUSTER_LISTEN\": \"off\",\n \"KONG_DATABASE\": \"off\",\n \"KONG_KIC\": \"on\",\n \"KONG_LUA_PACKAGE_PATH\": \"/opt/?.lua;/opt/?/init.lua;;\",\n \"KONG_NGINX_WORKER_PROCESSES\": \"2\",\n \"KONG_PLUGINS\": \"bundled\",\n \"KONG_PORTAL_API_ACCESS_LOG\": \"/dev/stdout\",\n \"KONG_PORTAL_API_ERROR_LOG\": \"/dev/stderr\",\n \"KONG_PORT_MAPS\": \"80:8000, 443:8443\",\n \"KONG_PREFIX\": \"/kong_prefix/\",\n \"KONG_PROXY_ACCESS_LOG\": \"/dev/stdout\",\n \"KONG_PROXY_ERROR_LOG\": \"/dev/stderr\",\n \"KONG_PROXY_LISTEN\": \"0.0.0.0:8000, 0.0.0.0:8443 http2 ssl\",\n \"KONG_STATUS_LISTEN\": \"0.0.0.0:8100\",\n \"KONG_STREAM_LISTEN\": \"off\",\n \"KONG_NGINX_DAEMON\": \"off\",\n \"KONG_MEM_CACHE_SIZE\": self.config[\"mem-cache-size\"].strip(),\n }\n\n self._kong_render_config_and_push(container, 'kong.conf.j2', KONG_CONFIG_PATH, context=context)", "def configure_nginx():\n current_role = env.effective_roles[0]\n demo_server_hostname = env.roledefs[current_role]['hostname']\n\n if exists('/etc/nginx/sites-enabled/default'):\n sudo('rm /etc/nginx/sites-enabled/default')\n context = {\n 'INSTANCE_PUBLIC_IP': env.host,\n 'DEMO_SERVER_HOSTNAME': demo_server_hostname,\n 'KOLIBRI_HOME': KOLIBRI_HOME,\n 'KOLIBRI_PORT': KOLIBRI_PORT,\n }\n if exists('/etc/nginx/sites-enabled/kolibri.conf'):\n sudo('rm /etc/nginx/sites-enabled/kolibri.conf')\n upload_template(os.path.join(CONFIG_DIR,'nginx_site.template.conf'),\n '/etc/nginx/sites-available/kolibri.conf',\n context=context, use_jinja=True, use_sudo=True, backup=False)\n sudo('chown root:root /etc/nginx/sites-available/kolibri.conf')\n sudo('ln -s /etc/nginx/sites-available/kolibri.conf /etc/nginx/sites-enabled/kolibri.conf')\n sudo('chown root:root /etc/nginx/sites-enabled/kolibri.conf')\n sudo('service nginx reload')\n puts(green('NGINX site kolibri.conf configured.'))", "def getConfigurationProxy(self):", "def lookup(self, hostname):\n matches = [\n config for config in self._config\n if self._allowed(config['host'], hostname)\n ]\n\n ret = {}\n for match in matches:\n for key, value in match['config'].items():\n if key not in ret:\n # Create a copy of the original value,\n # else it will reference the original list\n # in self._config and update that value too\n # when the extend() is being called.\n ret[key] = value[:]\n elif key == 'identityfile':\n ret[key].extend(value)\n ret = self._expand_variables(ret, hostname)\n return ret", "def configure(self, server, monitoring):\n # Providers\n for provider in self.providers:\n server.providers_add(provider)\n \n # Services\n for service in self.services:\n monitoring.add(service)", "def hxconfig(self, cmd):\n \n if self.backend is not 'hxhal' or self.controller is None:\n cmd.fail('text=\"No hxhal controller\"')\n return\n\n cmdKeys = cmd.cmd.keywords\n configName = cmdKeys['configName'].values[0]\n \n sam = self.sam\n\n try:\n configGroup, configName = configName.split('.')\n except:\n configGroup = 'h4rgConfig' if self.actor.instrument == 'PFS' else 'h2rgConfig'\n \n sam.updateHxRgConfigParameters(configGroup, configName)\n cmd.finish()", "def include_master_host_port(self):\n return \"\"\"--include-master-host-port\"\"\"", "def tljh_custom_jupyterhub_config(c):\n logger.info('Add JupyterHub to Jupyterlab environment')\n c.Spawner.cmd = ['jupyter-labhub']", "def merge_server_definitions(self, definitions):\n endpoint = self.build_url(\"/definitions\")\n return self.request('post', endpoint, data=definitions)", "def _AddSearchServers(dbroot,\n search_def_list,\n search_tab_id,\n supplemental_search_label,\n supplemental_search_url,\n log):\n if not search_def_list:\n search_server = dbroot.end_snippet.search_config.search_server.add()\n search_server.name.value = \"\"\n search_server.url.value = \"about:blank\"\n search_server.html_transform_url.value = \"about:blank\"\n search_server.kml_transform_url.value = \"about:blank\"\n search_server.suggest_server.value = \"about:blank\"\n return\n\n log.debug(\"_AddSearchServers()...\")\n for search_def in search_def_list:\n log.debug(\"Configure search server: %s\", search_def.label)\n search_server = dbroot.end_snippet.search_config.search_server.add()\n search_server.name.value = (\n \"%s [%s]\" % (\n search_def.label,\n search_tab_id) if search_tab_id else search_def.label)\n search_server.url.value = search_def.service_url\n\n # Building query string and appending to search server URL.\n add_query = search_def.additional_query_param\n add_config = search_def.additional_config_param\n query_string = None\n if add_query:\n query_string = (\"%s&%s\" % (\n query_string, add_query) if query_string else add_query)\n\n if add_config:\n query_string = (\"%s&%s\" % (\n query_string, add_config) if query_string else add_config)\n\n if query_string:\n search_server.url.value = \"%s?%s\" % (search_server.url.value,\n query_string)\n\n if search_def.fields and search_def.fields[0].suggestion:\n suggestion = search_server.suggestion.add()\n suggestion.value = search_def.fields[0].suggestion\n\n # Write 'html_transform_url' value to dbroot file.\n search_server.html_transform_url.value = search_def.html_transform_url\n\n # Write 'kml_transform_url' value to dbroot file.\n search_server.kml_transform_url.value = search_def.kml_transform_url\n\n # Write 'suggest_server' value to dbroot file.\n search_server.suggest_server.value = search_def.suggest_server\n\n # Write 'result_type' to dbroot file.\n if search_def.result_type == \"XML\":\n search_server.type = ResultType.RESULT_TYPE_XML\n\n # Set supplemental UI properties.\n if supplemental_search_label:\n search_server.supplemental_ui.url.value = supplemental_search_url\n search_server.supplemental_ui.label.value = supplemental_search_label\n\n log.debug(\"_AddSearchServers() done.\")", "def _configure(self, *args, **kwargs):\n config_map = {\n \"chatport\" : self.config,\n \"protocol\" : self.config,\n \"invis\" : self.config,\n \"masterserver\" : self.__requester.config,\n \"basicserver\" : self.__requester.config,\n \"honver\" : self.__requester.config\n }\n \n for kwarg in kwargs:\n if kwarg in config_map:\n config_map[kwarg][kwarg] = kwargs[kwarg]", "def configure_host(host, config_script_path):\n print_step('Checking host %s configuration' % host.name)\n if host.status == 'configured':\n print_step('Host %s is already configured' % host.name)\n if host.account != 'heimdall':\n host.account = 'heimdall'\n else:\n if host.account != 'root' and host.account != 'heimdall':\n print_step('Starting configuration with %s, hope this account has sudo rights...' % host.account)\n h = '%s@%s' % (host.account, host.ip)\n print_step('Host %s is not configured' % host.name)\n print_step('Checking if was manually installed..')\n # Looking for /opt/heimdall/imconfigured, maybe server was manually configured\n\n if not execute(cmd.check_is_configured, host.account, host=h).get(h):\n print_step('Host %s was manually configured\\n' % host.name)\n host.status = 'configured'\n host.account = 'heimdall'\n return host\n # Deploying configure script to tmp\n try:\n # Check if /opt/\n if host.account == 'root':\n remote_path = '/root/'\n else:\n remote_path = '/home/%s/' % host.account\n if not execute(cmd.deploy_config_script, config_script_path, remote_path, host.account, host=h).get(\n h):\n config_script = config_script_path.split('/')[-1]\n if not execute(cmd.execute_config_script, remote_path + config_script, host.account, host=h).get(\n h):\n print_step('Host %s is configured\\n' % host.name)\n host.status = 'configured'\n host.account = 'heimdall'\n execute(cmd.remove_remote_script, remote_path + config_script, host.account, host=h).get(h)\n return host\n else:\n raise FabricCommandError(\"Error during configuration script execution.\",\n cmd.execute_config_script.__name__)\n else:\n raise FabricCommandError(\"Error during configuration script deployment.\",\n cmd.deploy_config_script.__name__)\n except FabricCommandError as ffe:\n print ffe\n exit(ffe.code)\n return host", "def build_config(raw_config):\n\n config = {\"jobs\": {}, \"slack\": [], \"help\": {}, \"fabfiles\": {}, \"python_files\": {}}\n\n for namespace in raw_config:\n if \"_\" in namespace: # pragma: no cover\n raise RuntimeError(\"Namespaces must not contain underscores\")\n\n helps = []\n\n for job_type, job_config in raw_config[namespace][\"jobs\"].items():\n job_config[\"report_stdout\"] = job_config.get(\"report_stdout\", False)\n job_config[\"report_format\"] = job_config.get(\"report_format\", \"text\")\n job_config[\"report_success\"] = job_config.get(\"report_success\", True)\n job_config[\"python_function\"] = job_config.get(\"python_function\")\n namespaced_job_type = f\"{namespace}_{job_type}\"\n validate_job_config(namespaced_job_type, job_config)\n config[\"jobs\"][namespaced_job_type] = job_config\n\n for slack_config in raw_config[namespace][\"slack\"]:\n command = f\"{namespace} {slack_config['command']}\"\n slack_config[\"command\"] = command\n slack_config[\"job_type\"] = f\"{namespace}_{slack_config['job_type']}\"\n slack_config[\"regex\"] = build_regex_from_command(command)\n slack_config[\"template_params\"] = get_template_params(command)\n slack_config[\"delay_seconds\"] = slack_config.get(\"delay_seconds\", 0)\n\n validate_slack_config(slack_config)\n config[\"slack\"].append(slack_config)\n\n helps.append([command, slack_config[\"help\"]])\n\n config[\"help\"][namespace] = sorted(helps)\n\n if \"fabfile\" in raw_config[namespace]:\n config[\"fabfiles\"][namespace] = raw_config[namespace][\"fabfile\"]\n\n if \"python_file\" in raw_config[namespace]:\n config[\"python_files\"][namespace] = raw_config[namespace][\"python_file\"]\n\n config[\"slack\"] = sorted(config[\"slack\"], key=itemgetter(\"command\"))\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"job_type\"] not in config[\"jobs\"]:\n msg = f\"Slack command {slack_config['command']} references unknown job type {slack_config['job_type']}\"\n raise RuntimeError(msg)\n\n return config", "def server_add_and_update_opts(*args, **kwargs):\n\n def port_range_callback(ctx, param, value):\n if not value:\n return None\n\n value = value.lower().strip()\n if value == \"unspecified\":\n return None, None\n if value == \"unrestricted\":\n return 1024, 65535\n\n try:\n lower, upper = map(int, value.split(\"-\"))\n except ValueError: # too many/few values from split or non-integer(s)\n raise click.BadParameter(\n \"must specify as 'unspecified', \"\n \"'unrestricted', or as range separated \"\n \"by a hyphen (e.g. '50000-51000')\"\n )\n if not 1024 <= lower <= 65535 or not 1024 <= upper <= 65535:\n raise click.BadParameter(\"must be within the 1024-65535 range\")\n\n return (lower, upper) if lower <= upper else (upper, lower)\n\n def inner_decorator(f, add=False):\n if add:\n f = click.argument(\"HOSTNAME\")(f)\n else:\n f = click.option(\"--hostname\", help=\"Server Hostname.\")(f)\n\n default_scheme = \"gsiftp\" if add else None\n f = click.option(\n \"--scheme\",\n help=\"Scheme for the Server.\",\n type=click.Choice((\"gsiftp\", \"ftp\"), case_sensitive=False),\n default=default_scheme,\n show_default=add,\n )(f)\n\n default_port = 2811 if add else None\n f = click.option(\n \"--port\",\n help=\"Port for Globus control channel connections.\",\n type=int,\n default=default_port,\n show_default=add,\n )(f)\n\n f = click.option(\n \"--subject\",\n help=(\n \"Subject of the X509 Certificate of the server. When \"\n \"unspecified, the CN must match the server hostname.\"\n ),\n )(f)\n\n for adjective, our_preposition, their_preposition in [\n (\"incoming\", \"to\", \"from\"),\n (\"outgoing\", \"from\", \"to\"),\n ]:\n f = click.option(\n f\"--{adjective}-data-ports\",\n callback=port_range_callback,\n help=\"Indicate to firewall administrators at other sites how to \"\n \"allow {} traffic {} this server {} their own. Specify as \"\n \"either 'unspecified', 'unrestricted', or as range of \"\n \"ports separated by a hyphen (e.g. '50000-51000') within \"\n \"the 1024-65535 range.\".format(\n adjective, our_preposition, their_preposition\n ),\n )(f)\n\n return f\n\n return detect_and_decorate(inner_decorator, args, kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that handles authenticated users (restrict access to the 'sign up', 'login', etc.).
def restrict_authenticated_users(view_func): @wraps(view_func) def wrapper_func(view, *args, **kwargs): if view.request.user.is_authenticated: return redirect(reverse('posts:all')) else: return view_func(view, *args, **kwargs) return wrapper_func
[ "def _decorator(request, *args, **kwargs):\n is_authenticated = request.user.is_authenticated\n authenticated = is_authenticated if isinstance(is_authenticated, bool)\\\n else is_authenticated()\n if authenticated:\n return func(request, *args, **kwargs)\n if 'HTTP_AUTHORIZATION' in request.META.keys():\n authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\n if authmeth.lower() == 'basic':\n auth = auth.strip().decode('base64')\n identifier, password = auth.split(':', 1)\n username = get_username(identifier)\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return func(request, *args, **kwargs)\n raise Http404", "def anonimous_required(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect(resolve_url(settings.LOGIN_REDIRECT_URL))\n else:\n return view_func(request, *args, **kwargs)\n return _wrapped_view", "def login_required(view):\n \n @wraps(view)\n def inner_decorator(request,*args, **kwargs):\n \n out = createBaseResponseObject()\n \n try:\n if request.user.is_authenticated():\n return view(request, *args, **kwargs)\n \n except Exception, e:\n out['status'] = 0\n out['errors'] = [str(e)]\n return HttpResponse(json.dumps(out))\n \n out['status'] = 0\n out['errors'] = ['You must be logged in to use this feature']\n return HttpResponse(json.dumps(out))\n\n return inner_decorator", "def user_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n handler = args[0]\n if handler.user:\n return fn(*args, **kwargs)\n handler.redirect(u'/login')\n return wrapper", "def oauth_protected(f):\r\n @wraps(f)\r\n def decorated_view(request, *args, **kwargs):\r\n allowed, user = _check_request(request)\r\n if allowed:\r\n return f(request, user, *args, **kwargs)\r\n else:\r\n raise HttpResponseForbidden()\r\n return decorated_view", "def elevated_required(unauthorized):\n def decorator_wrapper(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not current_user.is_authenticated() or not current_user.is_elevated():\n return unauthorized()\n return func(*args, **kwargs)\n return decorated_view\n return decorator_wrapper", "def login_required(func):\n @wraps(func)\n def decorator():\n if not 'user' in session:\n return redirect(url_for('login'))\n return func()\n return decorator", "def requires_authorization(f):\n @wraps(f)\n def endpoint(*args, **kwargs):\n if not g.auth_user:\n raise Unauthorized('Not authenticated.')\n return f(*args, **kwargs)\n return endpoint", "def requires_logged_in(func):\n def ret_fn(*args):\n \"\"\"\n wrapper function\n :param args: argument for decorated function\n :return: return decorated function return values\n \"\"\"\n self = args[0]\n if not self.is_logged_in:\n self.try_login(raise_if_fail=True)\n return func(*args)\n return ret_fn", "def user_access_decorator(\n redirect_func, redirect_url_func, deny_func=None, redirect_field=REDIRECT_FIELD_NAME\n):\n\n def decorator(view_fn):\n def _wrapped_view(request, *args, **kwargs):\n redirect = redirect_func(request.user)\n if redirect and not request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\":\n # We must call reverse at the view level, else the threadlocal\n # locale prefixing doesn't take effect.\n redirect_url = redirect_url_func() or reverse(\"users.login\")\n\n # Redirect back here afterwards?\n if redirect_field:\n path = quote(request.get_full_path())\n redirect_url = \"%s?%s=%s\" % (redirect_url, redirect_field, path)\n\n return HttpResponseRedirect(redirect_url)\n elif (redirect and (request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\")) or (\n deny_func and deny_func(request.user)\n ):\n return HttpResponseForbidden()\n\n return view_fn(request, *args, **kwargs)\n\n return wraps(view_fn)(_wrapped_view)\n\n return decorator", "def admin_required(unauthorized):\n def decorator_wrapper(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not current_user.is_authenticated() or not current_user.is_admin():\n return unauthorized()\n return func(*args, **kwargs)\n return decorated_view\n return decorator_wrapper", "def authenticated(func):\n\tnewfunc = tornado.web.authenticated(func)\n\tnewfunc.original = func\n\treturn newfunc", "def login_required(view):\n\n @wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login', _external=True))\n return view(**kwargs)\n return wrapped_view", "def authenticated(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user:\n self.write({'status_code':404, 'error_msg':'not login'})\n return\n return method(self, *args, **kwargs)\n return wrapper", "def agave_jwt_login(func):\n @wraps(func)\n def decorated_function(request, *args, **kwargs):\n \"\"\"Decorated function.\"\"\"\n login_user_agave_jwt(request)\n return func(request, *args, **kwargs)\n\n return decorated_function", "def check_superuser(func):\n\n def wrapper(self, *args, **kwargs):\n if self.user.is_superuser:\n return True\n return func(self, *args, **kwargs)\n\n return wrapper", "def requires_login(func):\n @wraps(func)\n def requires_login_inner(self):\n self.enforce_login()\n return func(self)\n return requires_login_inner", "def logged_in(view):\n @functools.wraps(view)\n def decorated_view(*args, **kwargs):\n user_id = session.get('user', -1)\n logged_in_at = session.get('logged_in_at', None)\n user = User.query.get(user_id)\n\n # does check for database logout of user\n if user and user.logged_out_at > logged_in_at:\n session.clear()\n user = None\n\n return view(user=user, *args, **kwargs)\n return decorated_view", "def authenticated(fn):\n def wrapper(*args, **kwargs):\n if args[0]['valid']:\n return fn(*args, **kwargs)\n return wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the reward/regret for action a for the simple bandit. Use self.q_star (see reset)
def bandit_step(self, a): # TODO: 2 lines missing. raise NotImplementedError("") return reward, regret
[ "def reward(self, state, action):\n return self.reward_matrix[state][action]", "def reward(self, s = None):\n s = self.state(as_tuple=True) if s == None else s\n s = s if isinstance(s, tuple) else self.legal_states[s]\n return self.rewards[s]", "def reward(self):\n return self._r_sum", "def __getitem__(self, state_action):\n state, action = state_action\n if state in self._abs_set: \n support, probs = [[state], [1.0]]\n else:\n support, probs = super().__getitem__(state_action)\n \n reward = self._reward[state, action]\n return support, probs, reward", "def reward(self, r, newS):\n alpha = self._alpha\n gamma = self._gamma\n q = self._q\n\n newA = self._chooseAction(newS)\n\n qIndex = self._qIndex(self._lastState, self._lastAction)\n nextQIndex = self._qIndex(newS, newA)\n\n #The Q-VAlue update step from SARSA\n q[qIndex] += alpha * (r + gamma*q[nextQIndex] - q[qIndex])\n\n self._lastState = newS\n self._lastAction = newA\n return self._lastAction", "def reward(self, action: str):\n\n # reward agent alot for moving to and staying on square and punish for\n # not going next or on square\n if self.agent == self.square[0] and action == \"stay\":\n return 200\n elif self.agent == self.square[0] and action == \"left\":\n return -100\n elif self.agent == self.square[0] and action == \"right\":\n return -100\n elif self.agent == self.square[0] - 1 and action == \"right\":\n return 100\n elif self.agent == self.square[0] + 1 and action == \"left\":\n return 100\n else:\n return -50", "def get_reward(self, state, action, new_state, failed):\n raise NotImplementedError", "def R(self,state):\n return self.reward[state]", "def compute_reward(self, obs, action):\n pass", "def reward_func(self, state, action, Time_matrix):\n if (action[0] == 0 and action[1] == 0):\n reward = -C\n else:\n reward = R * (Time_matrix[action[0]-1][action[1]-1][state[1]][state[2]]) - C * ((Time_matrix[action[0]-1][action[1]-1][state[1]][state[2]]) + (Time_matrix[state[0]-1][action[0]-1][state[1]][state[2]]))\n return reward", "def reward(self):\n\n w_temp = self.config['temperature_w_in_reward']\n w_light = self.config['light_w_in_reward']\n w_cost = self.config['cost_w_in_reward']\n\n cost = self._calculate_cost_and_update_energy_source()\n temp, light = (self.inside_sensors['first']['temperature'],\n self.inside_sensors['first']['light'])\n req = self.user_requests\n\n temp_penalty = abs(temp - req['temp_desired'])\n\n light_penalty = abs(light - req['light_desired'])\n\n reward = (cost * w_cost) \\\n + (temp_penalty * w_temp) \\\n + (light_penalty * w_light)\n\n return -(reward + self.action_penalty)", "def action_reward(self, action_index):\n self.T += 1 # Increment time\n succ = self.action_list[action_index]() # Perform action\n if succ: # Check for successful action\n reward = self._score() # If successful, get score\n dr = reward - self.last_reward # Get the derivative\n self.last_reward = reward # Update last reward\n else: # If not successful\n reward = self.last_reward # No need to recalculate\n dr = self.FAILURE_COST # difference is 0\n \n # Set best score\n if reward > self.best_sc:\n print(\"Best Score: {0}\".format(reward))\n print(\"Time: {0}\".format(self.T))\n self.best_sc = reward\n self._display('Score{0}'.format(abs(reward)))\n \n # Update user on time_step \n if self.T % 100 == 0:\n print(\"Time: {0}\".format(self.T))\n print(\"Reward: {0}, Dr: {1}\".format(reward,dr))\n self._display('World')\n \n # Return score difference\n return dr", "def reward(self):\n\n return self.r_time()", "def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:\n raise NotImplementedError", "def get_reward(self, time, time_delta, core, debug=False):\n cycles = self.stats['time'][core].delta * sim.dvfs.get_frequency(core) / 1e9 # convert fs to cycles\n num_instructions = self.stats['coreinstrs'][core].delta\n ipc = num_instructions / (cycles or 1)\n\n current_power = self.energy_stats.power[('core', core)].d + self.energy_stats.power[('core', core)].s\n current_energy = self.energy_stats.energy[('core', core, 'energy-static')]\n current_energy += self.energy_stats.energy[('core', core, 'energy-dynamic')]\n print \"current_power reward{}: \".format(core), current_power\n print \"curr_ipc reward{}: \".format(core), ipc\n print \"CURRENT ENERGY IN REWARD FUNCTION.{}:\".format(core), current_energy\n reward = ipc - self.PF * abs(current_power - self.core_budgets[core])\n return reward", "def bestActionFor(mdp,state,Q):\r\n\r\n\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r", "def action_from_q(self, state: Tuple[int, int, bool]) -> int:\n\n probabilities = [self.epsilon / 2, self.epsilon / 2]\n\n greedy_choice = int(np.argmax(self.Q[state]))\n probabilities[greedy_choice] = 1 - self.epsilon + self.epsilon / 2\n action = np.random.choice(np.arange(2), p=probabilities)\n return action", "def get_reward(self):\n\n # Premise is sound, as we want to reward highest when sim.pose x,y,z is \n # essentially equal target_pos x,y,z (making the product of discount rate\n # and pose diff essentially 0 -- therefore, reward would be close to 1).\n #reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos).sum())\n \n # rrm - discounting the error\n #reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos).sum())\n reward = 2.-.2*(abs(self.sim.pose[:3] - self.target_pos).sum())\n \n # By experience in running, this reward gets negative quickly. We need to\n # scale it, so it can hopefully learn more efficiently.\n # Let's see what happens when we just cap the negative reward at -1\n \"\"\"\n if reward > 1.0:\n print(\"Reward is > 1: {0}\".format(reward))\n reward = 1.0\n elif reward < -1.0:\n print(\"Reward is < 1: {0}\".format(reward))\n reward = -1.0\n \"\"\"\n\n # Works pretty well... Trying something different below\n \"\"\"\n if reward > 0 and reward < 0.5:\n reward = reward * 2\n elif reward > 0.5:\n reward = reward * 4\n elif reward < -1.0:\n #print(\"Reward is < 1: {0}\".format(reward))\n reward = -1.0\n \"\"\"\n\n # Works well, but what if we provide extra reward (or penalize more) based on z coordinate (for hovering)\n \"\"\"\n absoluteZDiff = abs(self.sim.pose[2] - self.target_pos[2])\n if reward > 0 and reward < 0.5 and absoluteZDiff < 1:\n reward = reward * 3\n elif reward >= 0.5 and reward < 0.8 and absoluteZDiff < 1:\n reward = reward * 4\n elif reward >= 0.8 and absoluteZDiff < 1:\n reward = reward * 5\n elif reward > -1.0 and absoluteZDiff > 2:\n reward = -3.0 # penalize more for bad z\n else:\n reward = -1.0 # Cap it here\n \"\"\"\n \n # Instead of comparing to target z, compare to last z\n origTargetZDiff = abs(self.reward_last_z - self.target_pos[2])\n self.reward_last_z = self.reward_this_z\n self.reward_this_z = self.sim.pose[2]\n \n # diff between current z and last z\n lastZDiff = abs(self.reward_last_z - self.reward_this_z)\n # diff betwen current z and target z\n targetZDiff = abs(self.reward_this_z - self.target_pos[2])\n \n \"\"\"\n if lastZDiff < 0.1:\n if reward > 0 and reward < 0.5:\n reward = 0.5\n elif reward >= 0.5 and reward < 0.8:\n reward = 0.8\n elif reward >= 0.8 and reward < 1:\n reward = 1.0\n elif reward < -1.0:\n reward = -1.0 # Cap it here\n\n if reward > 0 and targetZDiff < 2:\n reward = reward * 1.2\n\n if (targetZDiff < origTargetZDiff):\n if reward > 0:\n reward = reward * 1.5\n else:\n reward = reward * 0.5\n \"\"\"\n \n if reward < -1.0:\n reward = -1.0\n \n return reward", "def _reward(self, player, state, actions):\n raise(NotImplementedError)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts arff to pandas dataframe.
def arff2df(data): arff = liacarff.loads(str(data)) attr = [a[0] for a in arff['attributes']] return pd.DataFrame(data=arff['data'], columns=attr)
[ "def convert_to_pandas_df(self):\n\n self.fsample = pd.DataFrame(self.fsample)\n self.fevent = pd.DataFrame(self.fevent)\n self.rec = pd.DataFrame(self.rec)", "def df2arff(df):\n from loaders_savers import load_csv # Imported here because of circular dependencies\n path = 'tmp_tmp432tmp123_tm_p_blabla3da.csv' # Stupid name to \"ensure\" we do not override something\n df.to_csv(path, index=False)\n try:\n data = load_csv(path)\n finally:\n remove(path)\n return data", "def into_pandas(self) -> pd.DataFrame:\r\n result = pd.DataFrame(\r\n {\r\n \"Marque\": self.__marques,\r\n \"CarType\": self.__carTypes,\r\n \"FuelType\": self.__fuels,\r\n \"Gearbox\": self.__gearboxes,\r\n \"ManufacturingDate\": self.__years,\r\n \"Engine_l\": self.__engines,\r\n \"Power_kW\": self.__powers,\r\n \"Mileage_km\": self.__mileages,\r\n \"Price_euro\": self.__prices,\r\n }\r\n )\r\n self.__result = result\r\n return result", "def to_pandas(self):\r\n\t\treturn pd.DataFrame(self._tfidf).T", "def table_to_dataframe(fc, workspace = None, index = None):\n\n if workspace:\n arcpy.env.workspace = workspace\n \n fields = [field.name for field in arcpy.ListFields(fc)]\n types = {field.name: field.type for field in arcpy.ListFields(fc)}\n #print(types)\n \n if index:\n index = [row[0] for row in arcpy.da.SearchCursor(fc,index)]\n \n data = [list(row) for row in arcpy.da.SearchCursor(fc,fields + ['SHAPE@JSON'])]\n df = pd.DataFrame(data, index = index, columns = fields + ['SHAPE@JSON'])\n \n for field in fields:\n df[field].astype(map_tbl_to_df[types[field]])\n #print(df['SHAPE@JSON'])\n return df", "def to_data_frame(self, num_records: int = 0) -> PandasDataFrame:", "def feature_df(self):\n import pandas as pd\n return pd.DataFrame(self.feature_records)", "def pandas2arff(df,filename,wekaname = \"pandasdata\",cleanstringdata=False,cleannan=True):\n\n import re\n\n def cleanstring(s):\n if s!=\"?\":\n return re.sub('[^A-Za-z0-9]+', \"_\", str(s))\n else:\n return \"?\"\n\n dfcopy = df #all cleaning operations get done on this copy\n\n\n if cleannan:\n dfcopy = dfcopy.fillna(-999999999) #this is so that we can swap this out for \"?\"\n #this makes sure that certain numerical columns with missing values don't get stuck with \"object\" type\n\n f = open(filename,\"w\")\n arffList = []\n arffList.append(\"@RELATION \" + wekaname + \"\\n\")\n #look at each column's dtype. If it's an \"object\", make it \"nominal\" under Weka for now (can be changed in source for dates.. etc)\n for i in range(df.shape[1]):\n if dfcopy.dtypes[i]=='O' or (df.columns[i] in [\"Class\",\"CLASS\",\"class\"]):\n if cleannan:\n dfcopy.iloc[:,i] = dfcopy.iloc[:,i].replace(to_replace=-999999999, value=\"?\")\n if cleanstringdata:\n dfcopy.iloc[:,i] = dfcopy.iloc[:,i].apply(cleanstring)\n _uniqueNominalVals = [str(_i) for _i in np.unique(dfcopy.iloc[:,i])]\n _uniqueNominalVals = \",\".join(_uniqueNominalVals)\n _uniqueNominalVals = _uniqueNominalVals.replace(\"[\",\"\")\n _uniqueNominalVals = _uniqueNominalVals.replace(\"]\",\"\")\n _uniqueValuesString = \" {\" + _uniqueNominalVals +\"}\"\n arffList.append(\"@ATTRIBUTE \" + df.columns[i] + _uniqueValuesString + \"\\n\")\n else:\n arffList.append(\"@ATTRIBUTE \" + df.columns[i] + \" real\\n\")\n #even if it is an integer, let's just deal with it as a real number for now\n arffList.append(\"@DATA\\n\")\n for i in range(dfcopy.shape[0]):#instances\n _instanceString = \"\"\n for j in range(df.shape[1]):#features\n if dfcopy.dtypes[j]=='O':\n _instanceString+=\"\\\"\" + str(dfcopy.iloc[i,j]) + \"\\\"\"\n else:\n _instanceString+=str(dfcopy.iloc[i,j])\n if j!=dfcopy.shape[1]-1:#if it's not the last feature, add a comma\n _instanceString+=\",\"\n _instanceString+=\"\\n\"\n if cleannan:\n _instanceString = _instanceString.replace(\"-999999999.0\",\"?\") #for numeric missing values\n _instanceString = _instanceString.replace(\"\\\"?\\\"\",\"?\") #for categorical missing values\n arffList.append(_instanceString)\n f.writelines(arffList)\n f.close()\n del dfcopy\n return True", "def fits_to_df(fname):\n\n d = fits.open(\"FoF\\\\processing\\\\datasets\\\\\" + fname)\n print(d.info())\n col_num = int(input(\"Choose the table to import: \"))\n t = Table(d[col_num].data)\n df = t.to_pandas()\n d.close()\n print(\"Dataframe of table \" + str(col_num) + \" initialized.\")\n print(df.head())\n return df", "def get_airfoil_data(self, airfoil):\n return pd.DataFrame(self.af_data[airfoil]).astype(float)", "def convert_to_dataframe(obj):\n from pm4py.objects.conversion.log import converter\n df = converter.apply(obj, variant=converter.Variants.TO_DATA_FRAME)\n return df", "def _to_dataframe(self, raw):\n\n # if data is already a DataFrame, do nothing.\n if isinstance(raw, pd.DataFrame):\n return raw\n\n output = pd.read_csv(raw)\n\n return output", "def r2pd_dataframe(df):\r\n with localconverter(ro.default_converter + pandas2ri.converter):\r\n pd_from_r_df = ro.conversion.rpy2py(df)\r\n return pd_from_r_df", "def _bcf_to_df(self):\n dict_list = [v.resume for v in self.variants]\n df = pd.DataFrame.from_records(dict_list)\n try:\n df = df[Filtered_freebayes._col_index]\n except (ValueError, KeyError):\n df = df[Filtered_freebayes._col_index[: len(df.columns)]]\n return df", "def query_to_df(query):\r\n conn = sql_server_connection()\r\n dataFrame = pd.io.sql.read_sql(query, conn)\r\n conn.commit()\r\n conn.close()\r\n return dataFrame", "def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning appropriate dtype\n \"\"\"\n return df", "def _abc_to_dataframe(abc_object):\n\n result = abc_object\n\n keys = list(result[0])\n\n results_dict = {key: [] for key in keys}\n\n for result in abc_object:\n for key in keys:\n results_dict[key].append(result[key])\n\n return pd.DataFrame(results_dict).apply(html.unescape)", "def from_arff(cls, arff_path, encode_nonnumeric=False):\n try:\n from scipy.io.arff import loadarff\n arff_data, arff_meta = loadarff(arff_path)\n except:\n raise ValueError('Error loading the ARFF dataset!')\n\n attr_names = arff_meta.names()[:-1] # last column is class\n attr_types = arff_meta.types()[:-1]\n if not encode_nonnumeric:\n # ensure all the attributes are numeric\n uniq_types = set(attr_types)\n if 'numeric' not in uniq_types:\n raise ValueError(\n 'Currently only numeric attributes in ARFF are supported!')\n\n non_numeric = uniq_types.difference({'numeric'})\n if len(non_numeric) > 0:\n raise ValueError('Non-numeric features provided ({}), '\n 'without requesting encoding to numeric. '\n 'Try setting encode_nonnumeric=True '\n 'or encode features to numeric!'.format(\n non_numeric))\n else:\n raise NotImplementedError(\n 'encoding non-numeric features to numeric is not implemented '\n 'yet! Encode features before exporting to ARFF.')\n\n dataset = cls()\n dataset._description = arff_meta.name\n\n # initializing the key containers, before calling self.add_samplet\n dataset._data = OrderedDict()\n dataset._targets = OrderedDict()\n dataset._targets = OrderedDict()\n\n num_samples = len(arff_data)\n num_digits = len(str(num_samples))\n make_id = lambda index: 'row{index:0{nd}d}'.format(index=index,\n nd=num_digits)\n sample_classes = [cls.decode('utf-8') for cls in arff_data['class']]\n class_set = set(sample_classes)\n label_dict = dict()\n # encoding class names to targets 1 to n\n for ix, cls in enumerate(class_set):\n label_dict[cls] = ix + 1\n\n for index in range(num_samples):\n samplet = arff_data.take([index])[0].tolist()\n sample_attrs = samplet[:-1]\n sample_class = samplet[-1].decode('utf-8')\n dataset.add_samplet(samplet_id=make_id(index),\n # ARFF rows do not have an ID\n features=sample_attrs,\n target=sample_class)\n # not necessary to set feature_names=attr_names for each samplet,\n # as we do it globally after loop\n\n dataset._feature_names = attr_names\n\n return dataset", "def npToDF(data):\n cols_end = ['TIME', 'DAY', 'MONTH', 'AMB_TEMP', 'MOD_TEMP',\n 'IRRADIATION', 'AC_POWER', 'DC_POWER']\n df = pd.DataFrame(data, columns=cols_end)\n df.reset_index(inplace=True)\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts pandas dataframe to arff data.
def df2arff(df): from loaders_savers import load_csv # Imported here because of circular dependencies path = 'tmp_tmp432tmp123_tm_p_blabla3da.csv' # Stupid name to "ensure" we do not override something df.to_csv(path, index=False) try: data = load_csv(path) finally: remove(path) return data
[ "def pandas2arff(df,filename,wekaname = \"pandasdata\",cleanstringdata=False,cleannan=True):\n\n import re\n\n def cleanstring(s):\n if s!=\"?\":\n return re.sub('[^A-Za-z0-9]+', \"_\", str(s))\n else:\n return \"?\"\n\n dfcopy = df #all cleaning operations get done on this copy\n\n\n if cleannan:\n dfcopy = dfcopy.fillna(-999999999) #this is so that we can swap this out for \"?\"\n #this makes sure that certain numerical columns with missing values don't get stuck with \"object\" type\n\n f = open(filename,\"w\")\n arffList = []\n arffList.append(\"@RELATION \" + wekaname + \"\\n\")\n #look at each column's dtype. If it's an \"object\", make it \"nominal\" under Weka for now (can be changed in source for dates.. etc)\n for i in range(df.shape[1]):\n if dfcopy.dtypes[i]=='O' or (df.columns[i] in [\"Class\",\"CLASS\",\"class\"]):\n if cleannan:\n dfcopy.iloc[:,i] = dfcopy.iloc[:,i].replace(to_replace=-999999999, value=\"?\")\n if cleanstringdata:\n dfcopy.iloc[:,i] = dfcopy.iloc[:,i].apply(cleanstring)\n _uniqueNominalVals = [str(_i) for _i in np.unique(dfcopy.iloc[:,i])]\n _uniqueNominalVals = \",\".join(_uniqueNominalVals)\n _uniqueNominalVals = _uniqueNominalVals.replace(\"[\",\"\")\n _uniqueNominalVals = _uniqueNominalVals.replace(\"]\",\"\")\n _uniqueValuesString = \" {\" + _uniqueNominalVals +\"}\"\n arffList.append(\"@ATTRIBUTE \" + df.columns[i] + _uniqueValuesString + \"\\n\")\n else:\n arffList.append(\"@ATTRIBUTE \" + df.columns[i] + \" real\\n\")\n #even if it is an integer, let's just deal with it as a real number for now\n arffList.append(\"@DATA\\n\")\n for i in range(dfcopy.shape[0]):#instances\n _instanceString = \"\"\n for j in range(df.shape[1]):#features\n if dfcopy.dtypes[j]=='O':\n _instanceString+=\"\\\"\" + str(dfcopy.iloc[i,j]) + \"\\\"\"\n else:\n _instanceString+=str(dfcopy.iloc[i,j])\n if j!=dfcopy.shape[1]-1:#if it's not the last feature, add a comma\n _instanceString+=\",\"\n _instanceString+=\"\\n\"\n if cleannan:\n _instanceString = _instanceString.replace(\"-999999999.0\",\"?\") #for numeric missing values\n _instanceString = _instanceString.replace(\"\\\"?\\\"\",\"?\") #for categorical missing values\n arffList.append(_instanceString)\n f.writelines(arffList)\n f.close()\n del dfcopy\n return True", "def arff2df(data):\n arff = liacarff.loads(str(data))\n attr = [a[0] for a in arff['attributes']]\n return pd.DataFrame(data=arff['data'], columns=attr)", "def convert_to_pandas_df(self):\n\n self.fsample = pd.DataFrame(self.fsample)\n self.fevent = pd.DataFrame(self.fevent)\n self.rec = pd.DataFrame(self.rec)", "def get_airfoil_data(self, airfoil):\n return pd.DataFrame(self.af_data[airfoil]).astype(float)", "def from_dataframe(df: pd.DataFrame):\n obj = Dataset()\n obj.labels = df.iloc[:, 0].to_numpy(dtype=int)\n obj.data = df.iloc[:, 1:].to_numpy(dtype=float)\n return obj", "def get_eta(df: pd.DataFrame) -> np.ndarray:\n return df[ETA_DOFS].to_numpy()", "def to_ndarray(df: DataFrameType) -> NDArray:\n if isinstance(df, (cudf.DataFrame, pd.DataFrame)):\n return df.values\n elif isinstance(df, (dask_cudf.DataFrame, dd.DataFrame)):\n return df.compute().values\n else:\n raise NotImplementedError(f'Conversion of type {type(df)} is not supported')", "def pd2np(pandas_dataframe):\n\n\t# replace NAs with -9999\n\tpandas_dataframe = pandas_dataframe.fillna(-9999)\n\n\tx = np.array(np.rec.fromrecords(pandas_dataframe.values))\n\tnames = pandas_dataframe.dtypes.index.tolist()\n\tx.dtype.names = tuple(names)\n\n\t# change field types\n\tfield_dtypes = dict_field_types(pandas_dataframe)\n\n\tif six.PY2:\n\t\tnew_types = field_dtypes.items()\n\telif six.PY3:\n\t\tnew_types = list(field_dtypes.items()) # need to cast to a list on Python 3\n\n\t# casts fields to new dtype (wq variables to float, date_time field to esri supported format\n\tx = x.astype(new_types) # arcpy np to fc only supports specific datatypes (date '<M8[us]'\n\n\treturn x", "def transform_for_prediction(self, df):\n\n # check Python version and use appropriate method to return iterable list\n if sys.version_info[0] < 3:\n items = df.iteritems()\n else:\n items = df.items()\n\n for col_name, col in items:\n if col.dtype.name == \"object\" or col.dtype.name == \"float64\":\n try:\n le = self.label_encoder.get(col_name, None)\n if le:\n if isinstance(le, MultiIdBinarizer):\n df = le.transform(df)\n elif le:\n df[col_name] = le.transform(df[col_name])\n else:\n self.log.error(\"Unable to find label encoder for \" + col_name)\n except ValueError as e:\n #\n #\n self.log.error(\"Need to handle new label for \" + col_name)\n\n return df", "def test_pandas_to_arrow():\n df = pd.DataFrame({\"a\":[1,2,3],\"b\":[4,5,6]})\n arr = pandas_to_arrow(df)\n assert(isinstance(arr,bytes))", "def toPyFMData(df):\n data = []\n users = set(df.User.unique())\n movies = set(df.Movie.unique())\n ratings = df.Rating.astype(float).tolist()\n for row in df.iterrows():\n data.append({\"user_id\": str(row[1].User), \"movie_id\": str(row[1].Movie)})\n return (data, np.array(ratings), users, movies)", "def dataframe_to_ndarray():\n df = pd.DataFrame(operations.get_mixed_matrix())\n print(type(df)) # <class 'pandas.core.frame.DataFrame'>\n print(df)\n ary = df.to_numpy()\n print(type(ary)) # <class 'numpy.ndarray'>\n print(ary)\n print(ary.shape) # (10, 10)", "def table_to_dataframe(fc, workspace = None, index = None):\n\n if workspace:\n arcpy.env.workspace = workspace\n \n fields = [field.name for field in arcpy.ListFields(fc)]\n types = {field.name: field.type for field in arcpy.ListFields(fc)}\n #print(types)\n \n if index:\n index = [row[0] for row in arcpy.da.SearchCursor(fc,index)]\n \n data = [list(row) for row in arcpy.da.SearchCursor(fc,fields + ['SHAPE@JSON'])]\n df = pd.DataFrame(data, index = index, columns = fields + ['SHAPE@JSON'])\n \n for field in fields:\n df[field].astype(map_tbl_to_df[types[field]])\n #print(df['SHAPE@JSON'])\n return df", "def dataframe_to_matrix(df):\n #1) add column of ones to dataframe\n df.insert(0, '1', 1)\n \n # Extracting all features (x1,x2,...,xD)\n X_train = df.iloc[:,:-1]\n \n # Convert DataFrame to array\n X_matrix_train = X_train.values\n \n # Extract 'y' column\n Y_train = df.iloc[:,-1:]\n \n # Convert DataFrame to array\n Y_matrix_train = Y_train.values\n \n return(X_matrix_train, Y_matrix_train)", "def convert_to_dataframe(obj):\n from pm4py.objects.conversion.log import converter\n df = converter.apply(obj, variant=converter.Variants.TO_DATA_FRAME)\n return df", "def test_ARIMA(self):\n # data_dir = os.path.join(os.getcwd(), \"VIIRS_Sample\")\n calc_TS_Trends(os.path.join(data_dir, \"Test_San_Juan_FullStats2.csv\"), os.path.join(data_dir, \"Test_San_Juan_ARIMA_Output2.csv\"), 3, \"2017/08/15\", 2)\n #output = os.path.join(data_dir, 'Test_San_Juan_FullStats2.csv')\n #base_instance.to_csv(output)\n base_instance = pd.read_csv(os.path.join(data_dir, \"Test_San_Juan_ARIMA_Output2.csv\"))\n gdf = pd.read_csv(os.path.join(data_dir, \"Test_San_Juan_ARIMA_Output.csv\"))\n gdf = gdf.sort_values(by=['date'])\n base_instance = base_instance.sort_values(by=['date'])\n base_instance = base_instance[['date', 'SeasonalForecast']]\n gdf = gdf[['date', 'SeasonalForecast']]\n print(gdf['SeasonalForecast'])\n print(base_instance['SeasonalForecast'])\n # base_instance.set_index('date')\n # gdf.set_index('date')\n # gdf = gdf.loc[:, ~gdf.columns.str.match('Unnamed')]\n # base_instance = base_instance.loc[:, ~base_instance.columns.str.match('Unnamed')]\n # assert base_instance.ground_truth_sindex.bounds == gdf.sindex.bounds\n # assert base_instance.equals(gpd.GeoDataFrame([]))\n pd.testing.assert_frame_equal(base_instance.reset_index(drop=True), gdf.reset_index(drop=True))\n # assert gdf['SeasonalForecast'].equals(base_instance['SeasonalForecast'])", "def process_native_data(df: pd.DataFrame):\n processed_df = process_all_labels(df.copy())\n return processed_df", "def pretty(self, df):\n pretty_df = pd.DataFrame(columns=df.columns)\n logger.info(\"Converting values in anonymized dataframe to their pretty versions\")\n for col in df.columns:\n for index, value in tqdm(df[col].iteritems(), total=len(df), desc=col):\n pretty_df.at[index, col] = convert_to_pretty(value, self.__config.get_default_date_format())\n return pretty_df", "def _transform(self, dataset: DataFrame) -> DataFrame:\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a new section of a class, useful because there may be multiple sections of, say, CSCI1300
def add_section(class_id, course_title, section_number): new_section = Section.objects.get_or_create(class_id = class_id)[0] new_section.course_title = course_title new_section.section_number = section_number new_section.save() return new_section
[ "def new_section(self, doc, *args, **kwargs):\n\n section = Section(doc, *args, **kwargs)\n if section.identifier:\n if section.identifier in self.sections:\n print(f'section identifier {section.identifier!r} already used')\n else:\n self.sections[section.identifier] = section\n doc.sections.append(section)\n return section", "def add_new_section(self, name, context=None):\n # Add a new section\n section = self.__class__(name=name, target=self.target,\n context=context)\n section.path = self.path + [name]\n # Indent the section apporpriately as well\n section.style.indentation = self.style.indentation\n section.translation_map = self.translation_map\n section.hrefs = self.hrefs\n self._structure[name] = section\n return section", "def cmd_add_section(self, name):\r\n self._tree.add_section(name)\r\n self.draw_panel()", "def add_section_to_form(newsection, form):\n\tform['iformSectionTiesArray'].append(newsection)", "def _add_section(self, name):\n\n self.sections[name] = Section()", "def add_section(self) -> 'CodeBuilder':\n section = CodeBuilder(indent=self._indent)\n self._code.append(section)\n return section", "def add_section(self, section):\n if not self.has_section(section):\n self[section] = dict()", "def create_section(self, level: int, section: str) -> None:\n self.add_output(section)\n self.add_output(self.sections[level] * len(section.rstrip()), line_breaks=2)", "def add_elem_to_section(newelem, section):\n\tsection['iform_section']['iformFieldsArray'].append(newelem)", "def add_class(self, class_):\n self.classes.append(class_)", "def add_class_traits(class_id, class_options_id, subclass_id=-1):\n class_options_id += 1\n choiceOptionsId = class_options_id\n add_options_connection(\"Class\", class_options_id, class_id, subclass_id)\n addElement = True\n while addElement:\n nextTrait = input(\"Enter the name of the trait to add, CHOICE for a choice, or enter for none: \")\n if nextTrait == \"CHOICE\":\n choiceOptionsId += 1\n amnt = Db.int_input(\"How many options are there: \")\n picks = Db.int_input(\"How many do they pick: \")\n add_options_connection(\"Class\", choiceOptionsId, class_id, subclass_id, picks)\n for x in range(0, amnt):\n nextTraitChoice = input(\"Enter the name of the next trait option: \")\n nextTraitChoice = Db.get_id(nextTraitChoice, \"Trait\")\n Db.insert(\"ClassTrait(classOptionsId, traitId)\", (choiceOptionsId, nextTraitChoice))\n\n elif nextTrait != \"\":\n nextTrait = Db.get_id(nextTrait, \"Trait\")\n Db.insert(\"ClassTrait(classOptionsId, traitId)\", (class_options_id, nextTrait))\n addElement = add_another_item()\n else:\n addElement = False\n return choiceOptionsId", "def add_xl_section(self):\n idx = self.view.selectedIndexes()[0]\n selected_item = idx.model().itemFromIndex(idx)\n dict_to_add = {\"xl\":{\"sheet\": \"\", \"name\": \"\"}}\n # add the subsubsection\n self.add_subsubsection(selected_item, dict_to_add)\n # update the tab text with an asterix if required\n self.update_tab_text()", "async def _courses_create_section(self, ctx, section_number: str, *, topic: str = \"\"):\n if not section_number:\n return\n\n parent_course = self.bot.get_guild(self.guild_id).get_channel(ctx.channel.category_id)\n channel = await self.bot.get_guild(self.guild_id).create_text_channel(name=f\"section-{section_number}\", category=parent_course)\n\n if topic:\n await channel.edit(topic=topic)", "def __init__(self, title, content, numbering=True):\n super(Section, self).__init__(content, title)\n self.numbering = numbering", "def addSection(self,title,description, reference=None):\r\n \r\n # We create an id that will later be used by the link in the index\r\n titleid = unicode(title).replace(\" \",\"\").replace(\"(\",\"\").replace(\")\",\"\").lower()\r\n body = self.html.find('body')\r\n indicelist = body.find('div/ol')\r\n item = ET.SubElement(indicelist,'li')\r\n ET.SubElement(item,'a',{\"href\":'#'+titleid}).text = unicode(title)\r\n seccion = ET.SubElement(body,'div',{\"class\":\"section\"})\r\n ET.SubElement(seccion, 'h2', {\"id\":titleid}).text = unicode(title) + \": \"\r\n sectdesc = ET.SubElement(seccion, 'blockquote')\r\n bold = ET.SubElement(sectdesc, 'b')\r\n bold.text = u\"Descripción: \"\r\n bold.tail = description\r\n if reference is not None:\r\n ET.SubElement(sectdesc, 'br')\r\n ET.SubElement(sectdesc, 'br')\r\n ET.SubElement(sectdesc, 'b').text = u\"Referencias: \"\r\n ET.SubElement(sectdesc, 'a', href=reference).text = reference\r\n # And return the empty section for it to be filled \r\n return seccion", "def document_client(self, section):\n self._add_title(section)\n self._add_class_signature(section)\n client_methods = get_instance_public_methods(self._client)\n self._add_client_intro(section, client_methods)\n self._add_client_methods(section, client_methods)", "def append(self, section: RoadSection):\n section.id = len(self.sections)\n if section.id == 0:\n section._is_start = True\n if section.id > 0:\n # Pass ending of last section as the transformation to next section\n ending: Tuple[Pose, float] = self.sections[-1].get_ending()\n section.set_transform(Transform(ending[0], ending[0].orientation))\n section.prev_length = self.length\n self.length = self.length + section.middle_line.length\n self.sections.append(section)", "def add_subsection(cls, sched_section):\n\n if not isinstance(sched_section, yc.ConfigElement):\n raise RuntimeError(\"Tried to add a subsection to the config, but it \"\n \"wasn't a yaml_config ConfigElement instance (or \"\n \"an instance of a ConfigElement child class).\\n\"\n \"Got: {}\".format(sched_section))\n\n name = sched_section.name\n\n names = [el.name for el in cls.ELEMENTS]\n\n if name in names:\n raise RuntimeError(\"Tried to add a subsection to the config called \"\n \"{0}, but one already exists.\".format(name))\n\n try:\n cls.check_leaves(sched_section)\n except ValueError as err:\n raise ValueError(\"Tried to add result parser named '{}', but \"\n \"leaf element '{}' was not string based.\"\n .format(name, err.args[0]))\n\n cls.ELEMENTS.append(sched_section)", "def create_section():\n dummies = [DummyOperator(task_id=f'task-{i + 1}') for i in range(5)]\n\n with TaskGroup(\"inside_section_1\") as inside_section_1:\n _ = [DummyOperator(task_id=f'task-{i + 1}',) for i in range(3)]\n\n with TaskGroup(\"inside_section_2\") as inside_section_2:\n _ = [DummyOperator(task_id=f'task-{i + 1}',) for i in range(3)]\n\n dummies[-1] >> inside_section_1\n dummies[-2] >> inside_section_2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enable creation of a new user, Return the newly added student.
def add_student(student_id, first_name, last_name, password, email_address, course_list, view_url, pic_url): new_User = User.objects.get_or_create(email = email_address)[0] new_User.first_name = first_name new_User.last_name = last_name new_User.password = password new_User.username = username new_User.save() new_student = Student.objects.get_or_create(user = new_User)[0] #get_or_create method returns a tuple, where element 0 is the object new_student.course_list = course_list new_student.save() return new_student
[ "def add_student(conf, backend, args):\n try:\n add_to_roster(\n conf, backend, conf.roster, args.name, args.username, args.section, args.force\n )\n except DuplicateUserError:\n logger.error(\"Student already exists in roster!\")", "def create(self, *args, **kwargs):\n return super().create_user(*args, **kwargs)", "def create_student(name, StudentList):\r\n new_student = Student(name)\r\n StudentList.append(new_student)\r\n return new_student", "def create(self, validated_data):\n student = Student.objects.create(**validated_data)\n return student", "def add_student(self):\n print(\"You have chosen to add a student\")\n #Collect input\n sid = input(\"Please input the student's ID\\n\").strip()\n fname = input(\"Please input the student's first name\\n\").strip()\n lname = input(\"Please input student's last name\\n\").strip()\n gpa_str = input(\"Please input student's GPA\\n\").strip()\n major = input(\"Please input student's Major\\n\").strip()\n fa = input(\"Please input student's Faculty Advisor\\n\").strip()\n #Check for invalid input\n tocheck = [fname, lname, major, fa]\n gpa = self.to_gpa(gpa_str)\n sid = self.to_id(sid)\n if self.are_all_names(tocheck) and not (gpa == None or sid == None):\n #send query to database interface object\n self.db.create_student(sid, fname,lname,gpa,major,fa)", "def _add_user(user, state):\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\n entry = CourseCreator(user=user, state=state)\n entry.save()\n return True\n\n return False", "def create_default_student(user):\n contacts = [\n {'notes': '', 'type': 'd', 'first_name': user.first_name, 'last_name': user.last_name, 'methods': [\n {'type': 'e', 'value': user.email},\n {'type': 't', 'value': user.phone},\n {'type': 'p', 'value': ''}]}]\n\n default_student = Student(key=ndb.Key(Student, DEFAULT_STUDENT_ID + \"-%s\" % (user.key.id()),\n namespace=\"_x_\"),\n first_name = user.first_name,\n last_name = user.last_name,\n contacts = contacts,\n default_student=True,\n is_direct = True,\n groups = [],\n school = None)\n\n default_student.put()", "def create_new_user(self):\n name = get_param('What is your name?', self.screen)\n address = get_param('What is your street address?', self.screen)\n city = get_param('What city do you live in?', self.screen)\n state = get_param('What state do you live in?', self.screen)\n zipcode = get_param('What is your zipcode?', self.screen)\n phone = get_param('What is your phone number?', self.screen)\n\n try:\n self.current_user = generate_new_customer(name, address, city, state, zipcode, phone)\n self.user_name = name\n self.logged_in_menu()\n except:\n self.unlogged_in_menu()", "def insertNewStudent(curs, username, passwd1, name, email, phone, cid, grade):\n hashed = bcrypt.hashpw(passwd1.encode('utf-8'), bcrypt.gensalt())\n curs.execute('INSERT into userpass(username,hashed) VALUES(%s,%s)', [username, hashed])\n curs.execute('INSERT into students (name, email, phone, grade, cid, joinDate, username, verified) '+\n 'VALUES (%s, %s, %s, %s, %s, CURRENT_DATE, %s, 0)', #0 boolean for is unverified student\n [name, email, phone, grade, cid, username])", "def create_user_to_test_with(self):\n user_object = User.objects.create_user(username='roy1',\n first_name='Roy',\n last_name='Hanley',\n email='rhanley8@gmail.com',\n password='small fat gibbon')\n user_object.save()\n user_extended_object = UserExtended(user=user_object)\n user_extended_object.save()\n return", "def create_user():\n user_record = request.get_json(force=True)\n\n add_user_to_db(user_record)\n\n return \"Successfully added user.\", 200", "def create_user():\r\n new_user = input(\"| Enter the name of the User |\")\r\n password = input(\"| Enter the Password of the User |\")\r\n aduser.ADUser.create(new_user, password=password, enable=True)\r\n return \"| User Created |\"", "def _create_user(self, request):\n # Should be implemented by subclass depending on data source for user\n raise SystemError(\"This method should not be called\")", "def create(student_id, email, first_name, last_name, **kwargs):\n new_member = Member(student_id, email, first_name, last_name, **kwargs)\n db.session.add(new_member)\n db.session.commit()\n\n return new_member", "def save(self):\n lesson = self.validated_data[\"lesson_id\"]\n if lesson.locked:\n return True\n new_student = self.validated_data[\"student_id\"]\n lesson.students.add(new_student)\n lesson.save()", "def __callAddStudent(self):\r\n idSubject=input(\" Give ID:\")\r\n name=input(\" Give name:\") \r\n try:\r\n st=self.__lista.createStudent(idSubject, name)\r\n self.__lista.addStudent(st)\r\n print(\"Student \"+st.getName()+\" has been successfully added.\")\r\n except InputError as ex:\r\n print(ex.getErrors())\r\n except IdError as ex:\r\n print(ex.getErrors())\r\n except DuplicateDataError as ex:\r\n print(ex.getErrors())\r\n except RepositoryError() as ex:\r\n print(ex.getErrors())", "def sample_user(name=\"Test User\", email=\"test@gmail.com\"):\n return get_user_model().objects.create_user(name, email)", "def create_student(**student):\n\n _student = dict.fromkeys(['first_name', 'last_name', 'middle_initial', \n 'address','email', 'phone_number'])\n\n _student['first_name'] = student.get('first_name', 'N/A')\n _student['last_name'] = student.get('last_name', 'N/A')\n _student['middle_initial'] = student.get('middle_initial', 'N/A')\n \n # Prompt user for student's contact information...\n _student['address'] = student.get('address', 'N/A')\n _student['email'] = student.get('email', 'N/A')\n _student['phone_number'] = student.get('phone_number', 'N/A')\n\n return _student", "def perform_create(self, serializer):\n serializer.save(user_data=self.get_user_data())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process items to display to be wrapped according to current terminal size.
def item_strings_formatted(self): #if self._item_strings_formatted and self.width == terminal.width: # return self._item_strings_formatted # Reset current wrapped item info self._item_strings_formatted = [] self.item_onscreenlocs = [] # Take each item to display by line, and break it into multiple lines based of current terminal width line_no = 0 for item_no, item_display in enumerate(self.item_strings): # Confirm indentation level for each item try: item_indentation = self.item_indentations[item_no] * 2 except IndexError: item_indentation = 0 finally: indentation = self.indent + item_indentation # Save location of each new broken down line self.item_onscreenlocs.append(line_no) for item_display_line in item_display.splitlines(): item_width = self.width - indentation - 1 # Width of item is width of page, minus item indentation, and minus an extra character for the trailing '│' symbol for line in terminal.wrap(item_display_line, item_width): if indentation > 1: line = terminal.bold_white_on_black(' ' * indentation + '│' + line) else: line = terminal.bold_white_on_black(' ' * indentation + line) self._item_strings_formatted.append(line) line_no += 1 # Add extra blank line under item line = terminal.bold_white_on_black(' ' * self.width) self._item_strings_formatted.append(line) line_no += 1 return self._item_strings_formatted
[ "def update_terminal_width(*ignored):\n w, h = shutil.get_terminal_size()\n config = IPython.get_ipython().config\n config.PlainTextFormatter.max_width = w - 1\n shell = IPython.core.interactiveshell.InteractiveShell.instance()\n shell.init_display_formatter()\n\n if 'numpy' in sys.modules:\n import numpy as np\n np.set_printoptions(linewidth=w - 5)", "def _ch_resize(self):\n self.main(self.stdscr)", "def adjust_widths(self):\n#PKHG>TODO 4jul12???!!check the rest of this def \n w = max(self.my_width ,self.max_width()) \n for item in self.children:\n item.set_width(w)\n if isinstance(item, MenuItem):\n item.create_backgrounds()\n else:\n item.draw()", "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n self.window.addstr(idx, 0, item, curses.color_pair(1))\n self.window.refresh()", "def size(self, size):\n n_lines, n_cols = size\n getmaxyx = YX(*self.tui.stdscr.getmaxyx())\n if n_lines is None:\n n_lines = getmaxyx.y - self.start.y\n if n_cols is None:\n n_cols = getmaxyx.x - self.start.x\n self.win.resize(n_lines, n_cols)", "def _define_size(self):\n rect = wx.Rect(0, 0, 0, 0)\n self.rect.height = self.PADDING\n for line in self.lines:\n # iterate through each line and set the TextBox around the line with\n # the largest width and height\n if line.rect.width > rect.width:\n rect.width = line.rect.width + self.PADDING\n if line.rect.height > rect.height:\n rect.height = line.rect.height\n # grow our TextBox to fit the next line of Text include Padding\n self.rect.height += line.rect.height\n self.rect.height += self.PADDING\n self.rect.width = rect.width", "def display(self):\n for box in self.boxes:\n box.display()\n for line in self.lines:\n line.display(self)", "def _wrap_chunks_to_fit_rect(self,\n lines_of_chunks: List[Dict[str,\n Union[List[Dict[str, Any]], int]]]):\n if self.width == -1:\n return\n for line_index, line_data in enumerate(lines_of_chunks):\n line_render_length = 0\n split_point = -1\n chunk_index = 0\n chunk_to_split_index = 0\n chunk_length = 0\n for chunk in line_data['chunks']:\n metrics = chunk['font'].metrics(chunk['text'])\n chunk_length = chunk['font'].size(chunk['text'])[0]\n line_render_length += chunk_length\n if line_render_length > self.width:\n char_line_length = line_render_length - chunk_length\n for i, metric in enumerate(metrics):\n advance = metric[4]\n char_line_length += advance\n if char_line_length > self.width:\n # splitting time\n chunk_to_split_index = chunk_index\n split_point = i\n break\n if split_point != -1:\n break\n chunk_index += 1\n\n if split_point != -1:\n self._split_chunk(chunk_length, chunk_to_split_index,\n line_data['chunks'], line_index,\n lines_of_chunks, split_point)", "def printsizes(wrappers, sorted = descending, threshold = 0, count = None,\n size = lambda w: w.size, show = lambda w: w.show()):\n\n vals = wrappers.values()\n if sorted != unsorted:\n vals.sort(lambda w1, w2: sorted * (size(w1) - size(w2)))\n\n print \"Size\".rjust(8), \"Total\".rjust(8), \"Object\"\n total = 0\n for w in vals[:count]:\n if size(w) >= threshold:\n total += size(w)\n print str(size(w)).rjust(8), str(total).rjust(8), show(w)", "def calcWidths(self):\n self.popup_width, dum = self.GetClientSizeTuple()\n count = len(self.choices)\n self.widths = [None] * count\n self.heights = [None] * count\n self.largest_width = None\n #print(\"calcWidths: %d items\" % count)\n for i in range(count):\n self._calcWidth(i)", "def set_size_from_terminal(self):\n self.rows, self.columns = os.popen('stty size', 'r').read().split()\n self.width = int(self.columns)\n self.height = int(self.rows) - 2", "def _wrap(self, availWidth):\n\n self._lines = []\n minWidthRequired = 0\n\n if len(self._prewrapLines) == 0:\n return minWidthRequired\n\n spaceWidth = self._fontManager.textWidth(\" \", self._fontSize)\n\n tempLines = self._prewrapLines\n currentTempLine = 0\n #logger.debug(\"TableText::_wrap> availWidth: \" + str(availWidth) + \", tempLines: \" + str(tempLines))\n for currentTempLine, tempLine in enumerate(tempLines):\n tempLineWidth = self._fontManager.textWidth(tempLine, self._fontSize)\n #logger.debug(\"TableText::_wrap> tempLine: \" + tempLine + \", tempLineWidth: \" + str(tempLineWidth))\n\n if tempLineWidth <= availWidth:\n # easy case: the entire line fits within availWidth\n\n #logger.debug(\"TableText::_wrap> tempLineWidth <= availWidth\")\n self._lines.append(tempLine)\n minWidthRequired = tempLineWidth\n else:\n # the line needs to be wrapped in order to fit in availWidth\n # break the line into tokens, each token is a word or number or a punctuation character\n\n tempWords = re.split(\"(\\W)\", tempLine)\n totalLinesHeight = len(self._lines) * self._lineHeight\n while len(tempWords) > 0 and totalLinesHeight < self._maxCellHeight:\n #logger.debug(\"TableText::_wrap> starting new line. Words left: \" + str(tempWords))\n currentLineWords = []\n remainingWidth = availWidth\n\n fillingCurrentLine = True\n # TODO: remove any leading spaces\n\n while fillingCurrentLine:\n tempWord = tempWords.pop(0)\n\n # reportlab doesn't handle \\t character. replace with space\n if tempWord == '\\t':\n tempWord = ' '\n\n #start = time.time()\n tempWordWidth = self._fontManager.textWidth(tempWord, self._fontSize)\n #finish = time.time()\n #stringWidthTimes.append(finish-start)\n\n\n #addSpace = False\n #logger.debug(\"TableText::_wrap> word: \" + tempWord + \", wordWidth: \" + str(tempWordWidth) + \", remainingWidth: \" + str(remainingWidth))\n if len(currentLineWords) > 0:\n tempWordWidth = tempWordWidth + spaceWidth\n #addSpace = True\n\n if tempWordWidth <= remainingWidth:\n # temp word can fit in the remaining space\n #logger.debug(\"TableText::_wrap> can fit within remaining space\")\n\n #if addSpace:\n #\tcurrentLineWords.append(\" \")\n currentLineWords.append(tempWord)\n remainingWidth = remainingWidth - tempWordWidth\n elif tempWordWidth <= availWidth:\n # temp word cannot fit in the remaining space, but can fit on a new line\n #logger.debug(\"TableText::_wrap> cannot fit within remaining space, but can fit on next line\")\n\n tempWords.insert(0, tempWord)\n remainingWidth = 0\n fillingCurrentLine = False\n else:\n # temp word cannot fit in the remaining space, nor can it fit on a new line\n # hard-break a segment off the word that will fit in the remaining space\n #logger.debug(\"TableText::_wrap> cannot fit within remaining space, and cannot fit on next line\")\n\n #if addSpace:\n #\tremainingWidth = remainingWidth - spaceWidth\n firstSegment, restOfWord = self._wrapWord(tempWord, remainingWidth, wordWidth = tempWordWidth)\n #logger.debug(\"TableText::_wrap> broke word \" + tempWord + \" into: \" + firstSegment + \" and \" + restOfWord)\n tempWords.insert(0, restOfWord)\n #if addSpace:\n #\tcurrentLineWords.append(\" \")\n currentLineWords.append(firstSegment)\n fillingCurrentLine = False\n\n if len(tempWords) == 0:\n # we're done filling the current line, given that there are no more words\n fillingCurrentLine = False\n\n currentLine = \"\".join(currentLineWords)\n self._lines.append(currentLine)\n totalLinesHeight = len(self._lines) * self._lineHeight\n minWidthRequired = max(minWidthRequired, availWidth - remainingWidth)\n\n # check to see if we need to truncate the cell's contents\n if (len(self._lines) * self._lineHeight) >= self._maxCellHeight:\n break\n\n if (currentTempLine + 1) < len(tempLines):\n # we truncated\n percentageShown = (100.0 * float(currentTempLine) / float(len(tempLines)))\n logger.info(\"TableText::_wrap> truncated cell contents. %s%% shown.\" % percentageShown)\n # TODO: this needs to be internationalized\n self._lines.append(\"... Truncated. %s%% shown.\" % percentageShown)\n\n logger.debug(\"TableText::_wrap> minWidthRequired: \" + str(minWidthRequired) + \", self._lines: \" + str(self._lines))\n return minWidthRequired", "def loading_bar(loading_section_size, count, total_number):\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == total_number:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()", "def set_text_size(self, *args) -> None:\n for widget in self.children:\n set_text_to_fit(widget)", "def render(self, size, focus=False):\n labels_height = self.label_columns.get_height() + 1\n self.contents[0] = (self.contents[0][0], (GIVEN, labels_height))\n return super(LabelsPile, self).render(size)", "def on_resize(self, size: tuple[int, int]) -> None:\n\n width, height = size\n\n for window in self._windows:\n newx = max(0, min(window.pos[0], width - window.width))\n newy = max(0, min(window.pos[1], height - window.height))\n\n window.pos = (newx, newy)\n\n self.print()", "def _set_size_list(self):\n # list of size choices\n for idx in range(len(SIZE)):\n self.font_size.Append(str(SIZE[idx]), idx)", "def wrap(text, width=80):\n lines = []\n for paragraph in text.split('\\n'):\n line = []\n len_line = 0\n for word in paragraph.split(' '):\n word.strip()\n len_word = len(word)\n if len_line + len_word <= width:\n line.append(word)\n len_line += len_word + 1\n else:\n lines.append(' '.join(line))\n line = [21*' '+word]\n len_line = len_word + 22\n lines.append(' '.join(line))\n return lines", "def wordWrap (text, lineWidth, gc):\r\n words = text.split()\r\n lines = []\r\n currentWidth = 0\r\n currentLine = ''\r\n \r\n for word in words:\r\n wordWidth = gc.GetTextExtent(word + ' ')[0]\r\n if currentWidth + wordWidth < lineWidth:\r\n currentLine += word + ' '\r\n currentWidth += wordWidth\r\n else:\r\n lines.append(currentLine)\r\n currentLine = word + ' '\r\n currentWidth = wordWidth\r\n \r\n lines.append(currentLine)\r\n return lines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Safely update selected item index.
def item_selected(self, potential_item_selected): if 0 <= potential_item_selected < len(self.items): self._item_selected = potential_item_selected
[ "def select(self, item):\n if not item.selected:\n item.selected=True\n self._total_selected+=1\n debug('*** total_selected={}'.format(self._total_selected))", "def update_selected_slot(self):\n if is_deleted(self): return\n new_selected = self.getSelectedFilterItems()\n for item in self.getFilterItems():\n if item in new_selected:\n item.mark_selected() \n else:\n item.mark_unselected()\n return", "def update_selected_client_idx(self):\n\n for i in range(0, self.client_list_widget.count()):\n item = self.client_list_widget.item(i)\n if item.isSelected():\n self.selected_client_idx = i\n return\n self.selected_client_idx = -1", "def current_changed(self, index):\r\n neditable = self.combo.itemData(index)\r\n self.emit(SIGNAL(\"changeCurrent(PyQt_PyObject, int)\"), neditable, index)", "def updateItems(self):\n selected = self.userInput.selected()\n if selected:\n for item in self.items[selected.value()]:\n self.itemSelect.addOption(item)", "def OnSelectedItemUpdated(self):\n pass", "def item_selection_changed(self):\n pass", "def btn_update_clicked(self):\n selected_index = self.gui.tbldata.selectionModel().selectedRows()\n print 'Test:', str(selected_index)\n self.gui.tbl_data.update_row(self.model_data, selected_index)", "def _setValueFromSelected( self ) :\n sel = self.dropdownlistbox.GetSelection()\n if sel > -1:\n itemtext = self._choices[sel]\n self.SetValue (itemtext)\n self.SetInsertionPointEnd ()\n self.SetSelection(-1, -1)\n self._showDropDown ( False )", "def on_index_changed(self, event):\n if not self._guard & INDEX_GUARD:\n self.declaration.index = self.widget.GetCurrentSelection()", "def unselect(self, item):\n if item.selected:\n item.selected=False\n self._total_selected-=1\n debug('*** total_selected={}'.format(self._total_selected))", "def setSelectedItemWithoutHighlight(self, item):\n if self.__selected != item:\n self.__selected = item\n self.triggerEvent('selectedItem', item=item, position=self.getPosition(), name=item.getName(), \n imageLabel=item.getImageLabel(), inventoryOnly=item.inventoryOnly(), \n options=item.getOptions(), adminActions=item.getAdminActions(), isTile=item.isTile(), \n highlight=1)", "def set_index(self, index: int) -> None:\n self.combo.setCurrentIndex(index)", "def open(self) -> None:\n self.selected_item = -1", "def _update_selection_items(self):\n transform = self._transform\n for item, selection in self._selection.items():\n path = transform.map(selection.unscaled_path)\n ppath = self._create_path(item, path)\n selection.set_path(ppath)", "def OnSelectedItemChanged(self):\n pass", "def set_current(self):\n\n if self.content.size() <= 0:\n return # Non item in listbox\n if self.index < 0:\n self.index = 0\n self.content.selection_clear(0, tk.END)\n self.content.selection_set(self.index)\n self.current.delete(\"1.0\", tk.END)\n self.current.insert(tk.END, self.content.get(self.index))\n self.current.focus()", "def set_item(self, index, new_item):\n row = index.row() if hasattr(index, \"row\") else index\n self.collection[row] = new_item\n self.dataChanged.emit(self.index(\n row, 0), self.index(row, self.rowCount() - 1))", "def test_tv_item_select(self):\n # Find by a path with indexes\n itm = self.ctrl.get_item((0, 2, 3))\n self.assertEqual(itm.is_selected(), False)\n\n # Select\n itm.select()\n self.assertEqual(itm.is_selected(), True)\n\n # A second call to Select doesn't remove selection\n itm.select()\n self.assertEqual(itm.is_selected(), True)\n\n itm = self.ctrl.get_item((0, 3, 2))\n itm.ensure_visible()\n self.assertEqual(itm.is_selected(), False)\n coords = itm.children(control_type='Text')[0].rectangle().mid_point()\n itm.click_input(coords=coords, absolute=True)\n self.assertEqual(itm.is_selected(), True)", "def setSelectedIndex(menu,index):\n\tassertMenu(menu)\n\tassert type(index) is int\n\tassert index >= 0 and index < len(menu[\"buttonList\"]),\"Index out of range. Tried is : %r and it have to be in [0,%r]\" % (index,len(menu[\"buttonList\"])-1)\n\tmenu[\"currentIndex\"] = index\n\tmenu[\"lastIndex\"] = index" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements a Dense convolution where radius_idx represents the indexes of the points in x and pos to be agragated into the new feature for each point in new_pos
def conv(self, x, pos, new_pos, radius_idx, scale_idx): assert scale_idx < len(self.mlps) new_features = self._prepare_features(x, pos, new_pos, radius_idx, scale_idx) new_features = self.mlps[scale_idx](new_features) # (B, mlp[-1], npoint, nsample) new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)]) # (B, mlp[-1], npoint, 1) new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) return new_features
[ "def resnetb_strided_block(layer_ind, inputs, features, radius, fdim, config, training):\n\n with tf.variable_scope('conv1'):\n w = weight_variable([int(features.shape[1]), fdim // 2])\n x = conv_ops.unary_convolution(features, w)\n x = leaky_relu(batch_norm(x,\n config.use_batch_norm,\n config.batch_norm_momentum,\n training))\n\n with tf.variable_scope('conv2'):\n w = weight_variable([config.num_kernel_points, int(x.shape[1]), fdim // 2])\n x = KPConv(inputs['points'][layer_ind + 1],\n inputs['points'][layer_ind],\n inputs['pools'][layer_ind],\n x,\n w,\n radius,\n config)\n\n x = leaky_relu(batch_norm(x,\n config.use_batch_norm,\n config.batch_norm_momentum,\n training))\n\n with tf.variable_scope('conv3'):\n w = weight_variable([int(x.shape[1]), 2 * fdim])\n x = conv_ops.unary_convolution(x, w)\n x = batch_norm(x,\n config.use_batch_norm,\n config.batch_norm_momentum,\n training)\n\n with tf.variable_scope('shortcut'):\n # Pool shortcuts to strided points TODO: max_pool or closest_pool ?\n shortcut = ind_max_pool(features, inputs['pools'][layer_ind])\n # shortcut = closest_pool(features, neighbors_indices)\n\n # Regular upsample of the features if not the same dimension\n if int(shortcut.shape[1]) != 2 * fdim:\n w = weight_variable([int(shortcut.shape[1]), 2 * fdim])\n shortcut = conv_ops.unary_convolution(shortcut, w)\n shortcut = batch_norm(shortcut,\n config.use_batch_norm,\n config.batch_norm_momentum,\n training)\n\n return leaky_relu(x + shortcut)", "def _remove_dilations(self):\n\n input_shape = tf_shape(self.input)\n in_spatial_shape = input_shape[1:self.spatial_size + 1]\n\n channels_count = input_shape[self.spatial_size + 1]\n # Initialize gather_ind with the range of channels\n # e.g. [0 1]\n gather_ind = tf.range(channels_count, dtype=tf.int64)\n # convert the vector to column vector\n # in the following logic we use column vectors\n gather_ind = tf.expand_dims(gather_ind, 1)\n\n # initilize the output_shape with zeros\n # self.output_shape will contain the shape of the\n # output tensor after the loop below is executed\n self.output_shape = [0] * (self.spatial_size + 2)\n self.output_shape[0] = input_shape[0]\n \"\"\"\n Loop over the input spatial dimensions starting from the\n last (most internal) going up to the first dimension\n\n On every step of the loop calculate the output indices and\n map them to the input indices using `_calc_input_ind`,\n then \"combine\" with the already calculated indices from the\n previous dimensions using cartesian product.\n\n For the following example input:\n\n Input: [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [ 12, 13, 14, 15]]\n\n Kernel: [2, 2]\n Dilations: [2, 2]\n Strides: [1, 1]\n\n these are the steps that will be executed:\n\n 1. Initilize gather_ind = [[0]] # we have only 1 channel\n\n 2. Loop step 0 (axis 1):\n filter_size = 3\n output_size = 4\n dim_ind = [[0]\n [2]\n [1]\n [3]]\n\n gather_ind = [[0 0]\n [2 0]\n [1 0]\n [3 0]]\n\n 3. Loop step 1 (axis 0):\n filter_size = 3\n output_size = 4\n dim_ind = [[0]\n [2]\n [1]\n [3]]\n\n gather_ind = [[0 0 0]\n [0 2 0]\n [0 1 0]\n [0 3 0]\n [2 0 0]\n [2 2 0]\n [2 1 0]\n [2 3 0]\n [1 0 0]\n [1 2 0]\n [1 1 0]\n [1 3 0]\n [3 0 0]\n [3 2 0]\n [3 1 0]\n [3 3 0]]\n\n These are the indices used for gather_nd operation to collect\n the values from the input data.\n \"\"\"\n\n for dim in range(self.spatial_size - 1, -1, -1):\n filter_size = (self.kernel_shape[dim] - 1) * \\\n self.dilations[dim] + 1\n output_size = ((\n (in_spatial_shape[dim] - filter_size) // self.strides[dim]) + 1\n ) * self.kernel_shape[dim]\n self.output_shape[dim + 1] = output_size\n\n # initialize the output dimension index with the range of the\n # dimension output size (e.g. 4): [0, 1, 2, 3]\n dim_ind = tf.range(output_size)\n\n # calculate the matching indices in the input data\n # [0, 1, 2, 3] will calculate to [0, 2, 1, 3]\n # from the above example\n dim_ind = self._calc_input_ind(dim_ind, self.kernel_shape[dim],\n self.dilations[dim], self.strides[dim])\n # convert to column vector\n dim_ind = tf.expand_dims(dim_ind, 1)\n\n # \"combine\" current dimension indices with the previous dimensions\n # using cartesian product\n gather_ind = tf_product(dim_ind, gather_ind)\n\n # The result from the above loop for 2D data will be:\n # [[y1, x1, c], [y2, x2, c], ..., [yn, xm, c]] where n is the height,\n # m is the width and c is the channel number.\n\n # set the channels count in the output_shape\n self.output_shape[self.spatial_size + 1] = channels_count\n\n # expand the dimensions to match the input dimensions + 1\n for x in range(self.spatial_size):\n gather_ind = tf.expand_dims(gather_ind, 0)\n # dublicate the indices for every batch\n gather_ind = tf.tile(gather_ind,\n [input_shape[0]] + [1] * (self.spatial_size + 1))\n\n # extract the selected values from the input\n output = tf.gather_nd(self.input, gather_ind, batch_dims=1)\n # reshape the output to the correct shape calculated earlier\n output = tf.reshape(output, self.output_shape)\n\n return output", "def spatial_filter_nd(x: torch.Tensor, kernel: torch.Tensor, mode: str = 'replicate') -> torch.Tensor:\n\n n_dim = x.dim() - 2\n if n_dim <= 0 or n_dim > 3:\n raise AssertionError(f\"the spatial dims of input should be 1, 2 or 3, get{n_dim}\")\n conv = _func_conv_nd_table[n_dim]\n\n pad = [None, None] * n_dim\n pad[0::2] = kernel.shape[2:]\n pad[1::2] = kernel.shape[2:]\n pad = [k // 2 for k in pad]\n\n return conv(F.pad(x, pad=pad, mode=mode), kernel)", "def create_2d_circle_kernel(radius):\n return np.array([ np.sqrt( x * x + y * y ) <= float(radius) for y in xrange(-radius, radius+1) for x in xrange(-radius, radius+1)], dtype=np.float32).reshape( radius*2+1, radius*2+1 )", "def convNd_sparse(src, kernel, centroids):\n results = []\n kernel_radii = [w//2 for w in kernel.shape]\n for centroid in centroids:\n slc = tuple(\n slice(int(centroid[d] - kernel_radii[d]), int(centroid[d] + kernel_radii[d] + 1))\n for d in range(len(src.shape))\n )\n box = src[slc]\n results.append((box * kernel).sum())\n\n return array(results, dtype=src.dtype)", "def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):", "def convolve_1d(x, k):\n y=np.zeros_like(x)\n\n \"\"\"\n *******************************************\n *** TODO: write code to perform convolution\n *******************************************\n\n The output should be the same size as the input\n You can assume zero padding, and an odd-sized kernel\n \"\"\"\n \n #Retrieve sizes for loops\n vector_size = x.size\n kernel_size = k.size\n half_size = int((kernel_size-1)/2)\n \n #Create a temporary array that is zero-padded at the front and back for edges\n results=np.zeros(vector_size+(2*half_size))\n for i in range(0, vector_size):\n results[half_size+i] = x[i]\n \n #Swap the kernel\n j = kernel_size-1\n for i in range(0,half_size):\n temp = k[i]\n k[i] = k[j]\n k[j] = temp\n j -= 1\n\n #Calculate the convolution for each index and output into results\n for i in range(0,vector_size):\n accumulator = 0;\n for j in range(0,kernel_size):\n accumulator += (results[i+j]*k[j])\n y[i] = accumulator\n\n\n \"\"\"\n *******************************************\n \"\"\"\n\n return y", "def circular_kernel(radius):\n\n width = 2*radius + 1\n kernel = np.zeros((width, width), np.uint8)\n for i in range(0, width):\n for j in range(0, width):\n if (i - radius) ** 2 + (j - radius) ** 2 <= radius**2:\n kernel[i][j] = int(1)\n return kernel", "def convolution(self):\n\n\n filter_number = int((self.filter.shape[0] - 1)/2)\n\n result = []\n\n for y_index in range(0, self.input_data.shape[0] - self.filter.shape[0] + 1, self.strides[0]):\n result_x = []\n \n for x_index in range(0, self.input_data.shape[1]-self.filter.shape[1] + 1, self.strides[1]):\n input_matrix = self.input_data[y_index : y_index + self.filter.shape[0], x_index : x_index + self.filter.shape[1]]\n result_x.append(self.__cov_single(input_data = input_matrix))\n\n result.append(result_x)\n\n return np.array(result)", "def update_contour_integration_kernel(model, new_tgt_filt_idx):\n\n K.set_value(model.layers[2].tgt_filt_idx, new_tgt_filt_idx)\n K.set_value(model.layers[3].tgt_filt_idx, new_tgt_filt_idx)\n\n # remember to recompile the model\n return model", "def _conv2d_dynamic(image, dynamic_kernel,\n padding):\n sz = dynamic_kernel.shape\n\n patches = lax.conv_general_dilated_patches(\n image,\n sz[2:], (1, 1),\n padding,\n dimension_numbers=('NHWC', 'OIHW', 'NHWC'))\n\n return jnp.sum(\n jnp.multiply(patches,\n dynamic_kernel.reshape((1, sz[0], sz[1], sz[2] * sz[3]))),\n axis=3,\n keepdims=True)", "def xcorr_depthwise(x, kernel):\n batch = kernel.size(0)\n channel = kernel.size(1)\n x = x.view(1, batch * channel, x.size(2), x.size(3))\n kernel = kernel.view(batch * channel, 1, kernel.size(2), kernel.size(3))\n out = F.conv2d(x, kernel, groups=batch * channel)\n out = out.view(batch, channel, out.size(2), out.size(3))\n return out", "def Convolve(image, kernel):\r\n\r\n\t# parameter dimensions\r\n\t# image\r\n\t(imH, imW) = image.shape[:2]\r\n\t# kernel\r\n\t(knH, knW) = kernel.shape[:2]\r\n\r\n\t# padding for image border\r\n\tpad = (knW-1)//2\r\n\r\n\r\n\t# add border to image\r\n\t# https: // www.geeksforgeeks.org / python - opencv - cv2 - copymakeborder - method /\r\n\timage = cv2.copyMakeBorder(image, pad, pad, pad, pad, cv2.BORDER_REPLICATE)\r\n\r\n\t#empty matrix to store convolved image\r\n\tresult = np.zeros((imH, imH, 3), dtype=\"float32\")\r\n\t# print(result.shape)\r\n\r\n\t# iterate over image and apply kernel\r\n\tfor column in np.arange(pad, imH + pad):\r\n\t\tfor row in np.arange(pad, imW + pad):\r\n\t\t\tfor channel in range(3):\r\n\r\n\t\t\t\t# print(c, r, channel)\r\n\t\t\t\t# roi = region of interest; centered region about coordinates\r\n\t\t\t\troi = image[column - pad:column + pad + 1, row - pad:row + pad + 1, channel]\r\n\r\n\t\t\t\t# convolution\r\n\t\t\t\tconvxy = (roi * kernel).sum()\r\n\r\n\t\t\t\t# put convolved value in result\r\n\t\t\t\tresult[column - pad, row - pad, channel] = convxy\r\n\r\n\t# scale new image to be in typical rgb range\r\n\tresult = rescale_intensity(result, in_range=(0,255))\r\n\tresult = (result*255).astype(\"uint8\")\r\n\treturn result", "def convolve(self):\n # get the kernel windows\n stridded = self._get_windows()\n # set the output raster dimensions\n self.out_raster = np.empty(shape=(stridded.shape[0],\n stridded.shape[1]))\n # iterate over the available kernels and perform the actual convolution\n # using the selected statistical operator\n for ii in range(stridded.shape[0]):\n for jj in range(stridded.shape[1]):\n self.out_raster[ii, jj] = self.kernel.applyConvolution(\n values=stridded[ii,jj,:,:])", "def cyclic_conv1d_alt(input_node, filter_):\n c = int(input_node.shape[2])\n kernel_node = filter_.coeffs\n\n N = int(input_node.shape[1])\n\n start = N - filter_.num_neg()\n end = filter_.num_pos() - 1\n\n # Perodically extend input signal\n input_new = tf.concat(\n (input_node[:, start:, :], input_node, input_node[:, 0:end, :]),\n axis=1\n )\n\n # Convolve with periodic extension\n result = tf.nn.conv1d(input_new, kernel_node[::-1], stride=1, padding=\"VALID\")\n\n return result", "def convolution_with_numpy(x: np.ndarray, W: np.ndarray, stride: int = 1, pad: int = 0) \\\n -> np.ndarray:\n n, c_i, h_i, w_i = x.shape\n c_o, c_i, h_k, w_k = W.shape\n\n if h_k > h_i or w_k > w_i:\n raise AssertionError('The height and width of x must be smaller than W')\n\n if stride > (h_i - h_k + 2 * pad + 1) or stride > (w_i - w_k + 2 * pad + 1):\n raise AssertionError('The value of stride must be smaller than output tensor size')\n\n h_o = math.floor((h_i - h_k + 2 * pad) / float(stride)) + 1\n w_o = math.floor((w_i - w_k + 2 * pad) / float(stride)) + 1\n\n if pad > 0:\n new_x = np.zeros((n, c_i, h_i + 2 * pad, w_i + 2 * pad), dtype=np.float32)\n for nn in range(n):\n for cc_i in range(c_i):\n new_x[nn][cc_i] = np.pad(x[nn][cc_i], pad, 'constant')\n x = new_x\n\n result = np.zeros((n, c_o, h_o, w_o), dtype=np.float32)\n\n for nn in range(n):\n for cc_i in range(c_i):\n for cc_o in range(c_o):\n for h in range(h_o):\n for w in range(w_o):\n for k_h in range(h_k):\n for k_w in range(w_k):\n result[nn, cc_o, h, w] += \\\n x[nn][cc_i][h * stride + k_h][w * stride + k_w] * \\\n W[cc_o][cc_i][k_h][k_w]\n\n return result", "def filter2d(\n x: torch.Tensor,\n kernel: torch.Tensor,\n padding: Union[int, Tuple[int, int]] = 0,\n) -> torch.Tensor:\n\n return F.conv2d(x, kernel, padding=padding, groups=x.size(1))", "def cut_neurons(x, y, states, max_cos, state_threshold):\n\n states_copy = states.copy()\n num_hits = len(states_copy)\n\n # Distances\n dist = numpy.zeros((num_hits, num_hits))\n\n for i in range(num_hits):\n\n for j in range(num_hits):\n r = numpy.sqrt((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2)\n dist[i, j] = r\n\n for i in range(num_hits):\n\n for j in range(num_hits):\n\n min_k = -1\n min_cos = 2\n\n for k in range(num_hits):\n\n if i == j or i == k or j == k:\n continue\n\n if states_copy[i, j] > state_threshold and states_copy[j, k] > state_threshold:\n\n scalar_prod = (x[i] - x[j]) * (x[k] - x[j]) + (y[i] - y[j]) * (y[k] - y[j])\n cos = scalar_prod / (dist[i, j] * dist[j, k])\n\n if cos < min_cos:\n\n if min_k != -1:\n states_copy[j, min_k] = states_copy[min_k, j] = 0\n # states_copy[j, min_k] = 0\n\n min_k = k\n min_cos = cos\n\n else:\n\n states_copy[j, k] = states_copy[k, j] = 0\n # states_copy[j, k] = 0\n\n if min_k != -1 and min_cos >= max_cos:\n states_copy[j, min_k] = states_copy[min_k, j] = 0\n # states_copy[j, min_k] = 0\n\n return states_copy", "def _calc_input_ind(self, output_ind, kernel, dilation, stride):\n return (output_ind // kernel) * (stride - kernel * dilation) + \\\n output_ind * dilation" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the labels of each grid on the PyGame Screen
def display_grid_labels() -> None: board1_label = label_font.render('Ship Board', False, (255, 255, 255)) board2_label = label_font.render('Firing Board', False, (255, 255, 255)) escape = instruction_font.render('HIT ESC TO RETURN TO THE MAIN MENU OR TO RESET THE GAME', False, (255, 255, 255)) columns = 'ABCDEFGH' rows = '12345678' # Label Player 1 Board for letter in range(0, 8): label = label_font.render(columns[letter], False, (255, 255, 255)) screen.blit(label, (205 + letter * 50, 125)) for number in range(0, 8): label = label_font.render(rows[number], False, (255, 255, 255)) screen.blit(label, (165, 170 + number * 50)) # Label Player 2 Board for letter in range(0, 8): label = label_font.render(columns[letter], False, (255, 255, 255)) screen.blit(label, (705 + letter * 50, 125)) for number in range(0, 8): label = label_font.render(rows[number], False, (255, 255, 255)) screen.blit(label, (665, 170 + number * 50)) screen.blit(board1_label, (320, 580)) screen.blit(board2_label, (800, 580)) screen.blit(escape, (25, 685))
[ "def display_lab(self):\n\n x = 0\n for row in self.config:\n y = 0\n for column in row:\n if column == 'm':\n self.screen.blit(self.wall, (x*20, y*20),\n (100, 0, 20, 20))\n if column == 'x':\n self.screen.blit(self.wall, (x*20, y*20),\n (380, 0, 20, 20))\n if column == 'D':\n self.screen.blit(self.wall, (x*20, y*20),\n (160, 20, 20, 20))\n if column == 'A':\n self.screen.blit(self.wall, (x*20, y*20),\n (160, 20, 20, 20))\n y += 1\n x += 1", "def display_grid():\n\n print(f\"{grid[0]} {grid[1]} {grid[2]}\")\n print(f\"{grid[3]} {grid[4]} {grid[5]}\")\n print(f\"{grid[6]} {grid[7]} {grid[8]}\")", "def show_grid(self):\n print grid_text(self.grid)\n print \"\"", "def show(self):\n for y in range(4):\n for x in range(4):\n current_number = self.game.field[y][x]\n colours = self.get_colours(current_number)\n self.field[y][x].config(fg=colours[1], bg=colours[2])\n if current_number:\n self.field[y][x][\"text\"] = current_number\n else:\n self.field[y][x][\"text\"] = \"\"\n\n self.labelScore[\"text\"] = \"Score:\\n\" + str(self.game.score)\n\n is_finished = self.game.is_finished()\n if is_finished:\n for element in [self.root] + self.listLabels:\n element.config(bg=self.bgEndOfGame)\n else:\n for element in [self.root] + self.listLabels:\n element.config(bg=self.bg)\n\n self.root.update()", "def draw_labels(self, screen):\n font = pygame.font.SysFont('Arial', self.font_size)\n\n for i, label in enumerate(self.source_labels):\n if self.source_state == i:\n bgcol = (0, 0, 255)\n else:\n bgcol = (0, 0, 0)\n text_surface = font.render(label, True, (255, 255, 255, 255), bgcol)\n textrect = text_surface.get_rect()\n textrect.centerx = self.source_button_rects[i].x + self.source_button_width/2\n textrect.centery = self.source_button_rects[i].y + self.source_button_height/2\n\n screen.blit(text_surface, textrect)\n\n for i, label in enumerate(self.sync_labels):\n if self.sync_state == i:\n bgcol = (0, 255, 0)\n else:\n bgcol = (0, 0, 0)\n text_surface = font.render(label, True, (255, 255, 255, 255), bgcol)\n textrect = text_surface.get_rect()\n textrect.centerx = self.sync_button_rects[i].x + self.sync_button_width/2\n textrect.centery = self.sync_button_rects[i].y + self.sync_button_height/2\n\n screen.blit(text_surface, textrect)", "def print_grid(self):\n\n print('\\n'.join([' '.join(it) for it in self.game_grid]))", "def draw_name():\n dist_from_top = 35\n label1 = pyglet.text.Label(\"Chess\", font_name='Courier New', font_size=16, bold=True,\n x=label_calib, y=w_height - dist_from_top,\n anchor_x='center', anchor_y='center', color=side_label_color)\n label2 = pyglet.text.Label(\"II\", font_name='Courier New', font_size=16, bold=True,\n x=label_calib, y=w_height - dist_from_top - 20,\n anchor_x='center', anchor_y='center', color=side_label_color)\n label1.draw()\n label2.draw()", "def display_map():\n for row in range(self.height):\n for col in range(self.width):\n surface = self.TileTexture[self.map1[row][col]]\n rect = surface.get_rect(topleft=(col * self.tilesize, row * self.tilesize))\n self.screen.blit(surface, rect)", "def print_grid(self):\n for i in range(0,6):\n print('[%s]' % ' , '.join(map(str,self.grid_row[i])))", "def display(self):\n\n print(self.grid[0], '\\n', self.grid[1], '\\n', self.grid[2], '\\n',\n self.grid[3], '\\n', f'Your score is {self.score}')\n # print(self.grid[1])\n # print(self.grid[2])\n # print(self.grid[3])\n # print(f'Your score is {self.score}')", "def show_player_titles(self):\n font = pg.font.SysFont(\"SFNS Display\", 50)\n ai_text = font.render(\"AI\", True, (255, 255, 255), (96,) * 3)\n pl_text = font.render(\"Player\", True, (255, 255, 255), (96,) * 3)\n ai_textrect = ai_text.get_rect()\n pl_textrect = ai_text.get_rect()\n ai_textrect.x = self.BLOCK + (self.SCREEN_WIDTH / 2) * self.BLOCK\n pl_textrect.x = 2 * self.BLOCK + (self.SCREEN_WIDTH * 1.5) * self.BLOCK\n ai_textrect.y = 1\n pl_textrect.y = 1\n self.display.blit(ai_text, ai_textrect)\n self.display.blit(pl_text, pl_textrect)\n\n # pg.display.update()", "def draw_grid(self):\r\n\r\n for x in range(0, FULLSIZE[0], CELLSIZE):\r\n pygame.draw.line(self.screen, GRAY, (x, 0), (x, FULLSIZE[0]))\r\n for y in range(0, FULLSIZE[1], CELLSIZE):\r\n pygame.draw.line(self.screen, GRAY, (0, y), (FULLSIZE[0], y))\r\n\r\n for x in range(0, FULLSIZE[0], CUBESIZE):\r\n pygame.draw.line(self.screen, BLACK, (x, 0), (x, FULLSIZE[0]), 2)\r\n for y in range(0, FULLSIZE[1], CUBESIZE):\r\n pygame.draw.line(self.screen, BLACK, (0, y), (FULLSIZE[0], y), 2)", "def grid(self):\n # Blank the grid\n for y in range(1, 21):\n self.addstr(y, 1, \" \" * 10)\n # Draw the new grid\n for x, column in enumerate(self.game.grid):\n for y, color in enumerate(column):\n y -= self.game.grid.top_buffer\n if y >= 0:\n self.pixel(x, y, color)\n\n # Finally refresh the screen\n self.refresh()", "def draw_board(self):\r\n for i in range(9):\r\n for j in range(9):\r\n # Draw black lines to demarkate the 'boxes'\r\n if j%3 == 0 and j != 0:\r\n pygame.draw.line(self.window, BLACK, ((j//3)*180, 0), ((j//3)*180, 540), 4)\r\n if i%3 == 0 and i != 0:\r\n pygame.draw.line(self.window, BLACK, (0, (i//3)*180), (540, (i//3)*180), 4)\r\n \r\n # Draw the cells \r\n self.cells[i][j].draw(BLACK, 1)\r\n\r\n # Don't draw the placeholder 0s on the grid\r\n if self.cells[i][j].value != 0:\r\n self.cells[i][j].display(self.cells[i][j].value, (21+(j*60), (16+(i*60))), (0, 0, 0))\r\n \r\n # Bottom most line\r\n pygame.draw.line(self.window, (0, 0, 0), (0, ((i+1) // 3) * 180), (540, ((i+1) // 3) * 180), 4)", "def print_board():\n \n print \"\"\n print \" | | \"\n print \" \" + grid_status[(1,1)] + \" | \" + grid_status[(1,2)] + \" | \" + grid_status[(1,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(2,1)] + \" | \" + grid_status[(2,2)] + \" | \" + grid_status[(2,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(3,1)] + \" | \" + grid_status[(3,2)] + \" | \" + grid_status[(3,3)]\n print \" | | \"\n print \"\"", "def view_game(participants):\n master = Tk()\n master.title(\"Current Game\")\n master.tk_setPalette(background='white', foreground='black', activeBackground='black',\n activeForeground='white')\n\n # Creating the base frame\n frame_base = Frame(master)\n frame_base.pack()\n\n # Label list of names displayed on window\n name_labels = []\n champ_labels = []\n grid_counter = [0, 0]\n\n # For each participant...\n for participant in participants:\n\n # Display the participant + champion on the left side IF they are on blue team\n if participant.team_id == 100:\n\n # Name\n name_labels.append(Label(frame_base, text=participant.name, fg='blue'))\n name_labels[-1].grid(row=grid_counter[0], column=0)\n\n # Champion\n champ_labels.append(Label(frame_base, text=participant.current_champion.name))\n champ_labels[-1].grid(row=grid_counter[0], column=1)\n\n # Increment counter\n grid_counter[0] += 1\n\n # Display the participant on the right side IF they are on red team\n elif participant.team_id == 200:\n\n # Name\n name_labels.append(Label(frame_base, text=participant.name, fg='red'))\n name_labels[-1].grid(row=grid_counter[1], column=3)\n\n # Champion\n champ_labels.append(Label(frame_base, text=participant.current_champion.name))\n champ_labels[-1].grid(row=grid_counter[1], column=2)\n\n # Increment counter\n grid_counter[1] += 1\n\n # Loop it!\n master.mainloop()", "def draw(win,grid,rows,width):\n win.fill(WHITE)\n for row in grid:\n for node in row:\n node.draw(win)\n draw_grid(win,rows,width)\n pygame.display.update()", "def display_graphics(self):\n\n # Blit the background\n self.dis.blit(statistics_menu, (0, 0))\n\n # Blit the leaderboard\n self.dis.blit(self.get_leaderboard(), (DISPLAY_X / 2 - self.lb_image_width / 2, self.leaderboard_y))\n\n # Set bold to True for this font (temporarily)\n bahnschrift_font_small.set_bold(True)\n\n # Blit the header items\n self.dis.blit(bahnschrift_font_small.render(\"Position\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2), self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"XP\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 150, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Level\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 300, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Bases\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 450, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Time\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 600, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Date\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 750, self.leaderboard_y - self.lb_header_offset))\n\n # Set bold to False for this font\n bahnschrift_font_small.set_bold(False)\n\n # Blit the button onto the display\n self.dis.blit(self.get_button(), (self.button_x, self.button_y))", "def turn_display(self):\n myfont = pygame.font.SysFont(\"arial\", 48)\n turndisp = myfont.render(\"Player %s's Turn\"%(self.model.turn%len(self.model.teams)+1), 1, (0,0,0))\n self.screen.blit(turndisp,(10,10))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display all the ships on the game board
def display_ships_hidden(game: game_code.BattleshipGame, player_1: bool) -> None: for cell_number in range(0, 8): for cell_letter in range(0, 8): piece = game.get_board()[cell_number][cell_letter] if piece is not None: cell = game_visualize.index_to_algebraic((cell_number, cell_letter)) display_piece(player_1, cell, piece.kind)
[ "def get_ships(game):\n return game.me.get_ships()", "def print_grid(self):\n\n print('\\n'.join([' '.join(it) for it in self.game_grid]))", "def __printHeatMap__(ships, p2board, mode, diff):\n if mode == \"search\":\n search(diff, ships, p2board)\n else:\n target(ships)\n a = AIBoardHeatMap\n plt.imshow(a, cmap='binary')\n plt.show()", "def display_board(self):\n board = self.get_board()\n\n # print the column headers\n print(' a b c d e f g h i')\n\n # print the board, one row at a time, by first making a list for the row\n for row in range(1, 11):\n\n # start row with number\n row_list = [str(row)]\n\n # extra space after single digits so they match the '10 '\n if row < 10:\n row_list.append('')\n\n # fill the row_list\n for col in 'abcdefghi':\n position = col + str(row)\n row_list.append(board[position])\n\n # display piece_ids in row_list separated by spaces\n print(*row_list)\n\n # finish with blank line\n print()", "def showShop(self):\n # idea di base: lista degli oggetti in objs con relativi prezzi\n out = []\n for elem in self.shop:\n out.append(elem)", "def place_ship_on_gameboard(self):\n\t\tfor i in range(self.length):\n\t\t\tself.Gameboard.invisibleGameboard[self._shipCoordinatesY[i]][self._shipCoordinatesX[i]] = \"X\"", "def show_table(game):\n x, y = properties.SCREEN_WIDTH / 2, properties.SCREEN_HEIGHT / 2\n for card in game.table[-4:]:\n card_image = card.image\n rect = card_image.get_rect()\n rect.center = (x, y)\n SCREEN.blit(card_image, rect)\n x += 30", "def ShowMap(self):\n for j in reversed(range(self.width+border)):\n \n for i in range(self.length+border):\n if((self.boardChar[i][j].type == \"null\" and\n self.boardItem[i][j].type == \"null\")\n or self.boardChar[i][j].type == \"wall\"):\n print(self.boardFloor[i][j].symbol, end=\" \")\n \n elif(self.boardChar[i][j].type == \"null\" and \n self.boardItem[i][j].type != \"null\"):\n print(self.boardItem[i][j].symbol, end=\" \")\n \n else:\n print(self.boardChar[i][j].symbol, end=\" \")\n \n print(\"\")", "def placeShips(self):\r\n playerInfo = self.playerInfo\r\n if playerInfo.readyToPlay:\r\n return\r\n boatIndex = len(playerInfo.boats)\r\n\r\n #Verification to check if we are ready to play\r\n if len(self.model.BOAT_SIZES) == boatIndex:\r\n print('PLAYER {} READY TO PLAY'.format(self.playerInfo.playerName))\r\n self.gridInteractions.setActionGrid(\r\n playerInfo.OPPONENT_GRID)\r\n playerInfo.readyToPlay = True\r\n return\r\n\r\n playerGrid = self.playerGrid\r\n grid_length = self.model.GRID_SIZE\r\n\r\n boat_length = self.model.BOAT_SIZES[boatIndex]\r\n\r\n for i in range(grid_length):\r\n match = self.getBoatFromSequence(playerGrid.getLine(i), boat_length)\r\n if match:\r\n y = match\r\n self.addBoat(self.rowColToNPCoord([i], match))\r\n self.__clearPendingCells()\r\n match = self.getBoatFromSequence(playerGrid.getColumn(i), boat_length)\r\n if match:\r\n self.addBoat(self.rowColToNPCoord(match, [i]))\r\n self.__clearPendingCells()", "def display_board(board):\n \n for i in range(len(board)): # Finds in the board array the cards in order to push them in the print_card function \n for card in board[i]:\n print(print_card(card),end=\"\") \n print(\"\")", "def display_map():\n for row in range(self.height):\n for col in range(self.width):\n surface = self.TileTexture[self.map1[row][col]]\n rect = surface.get_rect(topleft=(col * self.tilesize, row * self.tilesize))\n self.screen.blit(surface, rect)", "def displayBoard():\n return render_template('board.html',\n clubs=clubs)", "def display_ship_placement(click: bool, length: int, orientation: bool, color: Tuple[int, int, int],\r\n ship_type: str) -> None:\r\n\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n global user_game_board, ships_on_board\r\n # check mouse position based on a horizontal ship orientation\r\n if orientation:\r\n # check if the mouse position is within the grid and withing the length of the ship\r\n if 190 <= mouse_x <= 189 + (9 - length) * 50 and 160 <= mouse_y <= 560:\r\n pos = convert_mouse_to_display_pos(mouse_x, mouse_y, True)\r\n cell = convert_mouse_to_letternum(mouse_x, mouse_y, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the top left corner of the grid\r\n if mouse_y < 160 and mouse_x < 190:\r\n pos = convert_mouse_to_display_pos(200, 170, True)\r\n cell = convert_mouse_to_letternum(200, 170, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the bottom left corner of the grid\r\n if mouse_y > 560 and mouse_x < 190:\r\n pos = convert_mouse_to_display_pos(200, 550, True)\r\n cell = convert_mouse_to_letternum(200, 550, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the bottom right boundary for the ship length\r\n if mouse_y > 560 and mouse_x > 189 + (9 - length) * 50:\r\n pos = convert_mouse_to_display_pos(189 + (9 - length) * 50, 540, True)\r\n cell = convert_mouse_to_letternum(189 + (9 - length) * 50, 540, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the top right boundary for the ship length\r\n if mouse_y < 160 and mouse_x > 189 + (9 - length) * 50:\r\n pos = convert_mouse_to_display_pos(189 + (9 - length) * 50, 170, True)\r\n cell = convert_mouse_to_letternum(189 + (9 - length) * 50, 170, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the top boundary\r\n if 190 <= mouse_x <= 189 + (9 - length) * 50 and mouse_y < 160:\r\n pos = convert_mouse_to_display_pos(mouse_x, 170, True)\r\n cell = convert_mouse_to_letternum(mouse_x, 170, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the bottom boundary\r\n if 190 <= mouse_x <= 189 + (9 - length) * 50 and mouse_y > 560:\r\n pos = convert_mouse_to_display_pos(mouse_x, 540, True)\r\n cell = convert_mouse_to_letternum(mouse_x, 540, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the far right boundary based on ship length\r\n if mouse_x > 189 + (9 - length) * 50 and 160 <= mouse_y <= 560:\r\n pos = convert_mouse_to_display_pos(189 + (9 - length) * 50, mouse_y, True)\r\n cell = convert_mouse_to_letternum(189 + (9 - length) * 50, mouse_y, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the far left boundary\r\n if mouse_x < 190 and 160 <= mouse_y <= 560:\r\n pos = convert_mouse_to_display_pos(200, mouse_y, True)\r\n cell = convert_mouse_to_letternum(200, mouse_y, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25)\r\n check_all.append(user_game_board[y][x + i] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n # Check the boundaries if the ship is in the vertical position\r\n else:\r\n # check if the mouse is within the grid boundaries for a vertical ship\r\n if 190 <= mouse_x <= 590 and 160 <= mouse_y <= 159 + (9 - length) * 50:\r\n pos = convert_mouse_to_display_pos(mouse_x, mouse_y, True)\r\n cell = convert_mouse_to_letternum(mouse_x, mouse_y, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n # check if the mouse is past the top left corner of the grid\r\n if mouse_y < 160 and mouse_x < 190:\r\n pos = convert_mouse_to_display_pos(200, 170, True)\r\n cell = convert_mouse_to_letternum(200, 170, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the bottom left corner of the grid based on ship length\r\n if mouse_y > 160 + (9 - length) * 50 and mouse_x < 190:\r\n pos = convert_mouse_to_display_pos(200, 159 + (9 - length) * 50, True)\r\n cell = convert_mouse_to_letternum(200, 159 + (9 - length) * 50, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x + index] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the bottom right corner of the grid based on ship length\r\n if mouse_y > 160 + (9 - length) * 50 and mouse_x > 590:\r\n pos = convert_mouse_to_display_pos(580, 159 + (9 - length) * 50, True)\r\n cell = convert_mouse_to_letternum(580, 159 + (9 - length) * 50, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the top right corner of the grid\r\n if mouse_y < 160 and mouse_x > 590:\r\n pos = convert_mouse_to_display_pos(580, 170, True)\r\n cell = convert_mouse_to_letternum(580, 170, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the top of the grid\r\n if 190 <= mouse_x <= 590 and mouse_y < 160:\r\n pos = convert_mouse_to_display_pos(mouse_x, 170, True)\r\n cell = convert_mouse_to_letternum(mouse_x, 170, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the bottom of the grid based on ship length\r\n if 190 <= mouse_x <= 590 and mouse_y > 160 + (9 - length) * 50:\r\n pos = convert_mouse_to_display_pos(mouse_x, 158 + (9 - length) * 50, True)\r\n cell = convert_mouse_to_letternum(mouse_x, 158 + (9 - length) * 50, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the right side of the grid\r\n if mouse_x > 590 and 160 <= mouse_y <= 159 + (9 - length) * 50:\r\n pos = convert_mouse_to_display_pos(580, mouse_y, True)\r\n cell = convert_mouse_to_letternum(580, mouse_y, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + i][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None\r\n\r\n # check if the mouse is past the left side of the grid\r\n if mouse_x < 190 and 160 <= mouse_y <= 160 + (9 - length) * 50:\r\n pos = convert_mouse_to_display_pos(200, mouse_y, True)\r\n cell = convert_mouse_to_letternum(200, mouse_y, True)\r\n y, x = game_visualize.algebraic_to_index(cell)\r\n check_all = []\r\n for i in range(0, length):\r\n pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25)\r\n check_all.append(user_game_board[y + 1][x] is None)\r\n if click and all(check_all):\r\n for index in range(0, length):\r\n user_game_board[y + index][x] = game_code.Piece(ship_type)\r\n ships_on_board += 1\r\n return None", "def showChips(self):\n print(\"You currently have %d chips\" % self.chips)", "def display_revealed_puzzle(self):\n for i in self.revealed_puzzle:\n print(i, end=\" \")\n print(\"\")", "def print_grid(self):\n for i in range(0,6):\n print('[%s]' % ' , '.join(map(str,self.grid_row[i])))", "def show_inventory(self):\n for i in self.inventory:\n self.show_car(i)", "def draw_pieces(self):\n for i in range(8):\n for j in range(8):\n if self.get_board_array()[i, j].get_content() is not None:\n self.screen.blit(\n self.get_board_array()[i, j].get_content().get_visual(),\n (int(j * self.h / 8), int(i * self.h / 8))\n )", "def print_board(self):\n for cell in self.board:\n print(\"current step: {}, ladder top: {}, snake_tail: {}\".\n format(cell.current_step, cell.ladder_top, cell.snake_tail))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the ship based on the user's mouse taking into account that a user's mouse might not be on the grid. In which case the ship is displayed the farthest it can possibly be displayed on the grid While the amount of lines may look intimidating, there are simply a lot of possibilities when the user mouse is
def display_ship_placement(click: bool, length: int, orientation: bool, color: Tuple[int, int, int], ship_type: str) -> None: mouse_x, mouse_y = pygame.mouse.get_pos() global user_game_board, ships_on_board # check mouse position based on a horizontal ship orientation if orientation: # check if the mouse position is within the grid and withing the length of the ship if 190 <= mouse_x <= 189 + (9 - length) * 50 and 160 <= mouse_y <= 560: pos = convert_mouse_to_display_pos(mouse_x, mouse_y, True) cell = convert_mouse_to_letternum(mouse_x, mouse_y, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the top left corner of the grid if mouse_y < 160 and mouse_x < 190: pos = convert_mouse_to_display_pos(200, 170, True) cell = convert_mouse_to_letternum(200, 170, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the bottom left corner of the grid if mouse_y > 560 and mouse_x < 190: pos = convert_mouse_to_display_pos(200, 550, True) cell = convert_mouse_to_letternum(200, 550, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the bottom right boundary for the ship length if mouse_y > 560 and mouse_x > 189 + (9 - length) * 50: pos = convert_mouse_to_display_pos(189 + (9 - length) * 50, 540, True) cell = convert_mouse_to_letternum(189 + (9 - length) * 50, 540, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the top right boundary for the ship length if mouse_y < 160 and mouse_x > 189 + (9 - length) * 50: pos = convert_mouse_to_display_pos(189 + (9 - length) * 50, 170, True) cell = convert_mouse_to_letternum(189 + (9 - length) * 50, 170, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the top boundary if 190 <= mouse_x <= 189 + (9 - length) * 50 and mouse_y < 160: pos = convert_mouse_to_display_pos(mouse_x, 170, True) cell = convert_mouse_to_letternum(mouse_x, 170, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the bottom boundary if 190 <= mouse_x <= 189 + (9 - length) * 50 and mouse_y > 560: pos = convert_mouse_to_display_pos(mouse_x, 540, True) cell = convert_mouse_to_letternum(mouse_x, 540, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the far right boundary based on ship length if mouse_x > 189 + (9 - length) * 50 and 160 <= mouse_y <= 560: pos = convert_mouse_to_display_pos(189 + (9 - length) * 50, mouse_y, True) cell = convert_mouse_to_letternum(189 + (9 - length) * 50, mouse_y, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the far left boundary if mouse_x < 190 and 160 <= mouse_y <= 560: pos = convert_mouse_to_display_pos(200, mouse_y, True) cell = convert_mouse_to_letternum(200, mouse_y, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0] + i * 50, pos[1]), 25) check_all.append(user_game_board[y][x + i] is None) if click and all(check_all): for index in range(0, length): user_game_board[y][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # Check the boundaries if the ship is in the vertical position else: # check if the mouse is within the grid boundaries for a vertical ship if 190 <= mouse_x <= 590 and 160 <= mouse_y <= 159 + (9 - length) * 50: pos = convert_mouse_to_display_pos(mouse_x, mouse_y, True) cell = convert_mouse_to_letternum(mouse_x, mouse_y, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the top left corner of the grid if mouse_y < 160 and mouse_x < 190: pos = convert_mouse_to_display_pos(200, 170, True) cell = convert_mouse_to_letternum(200, 170, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the bottom left corner of the grid based on ship length if mouse_y > 160 + (9 - length) * 50 and mouse_x < 190: pos = convert_mouse_to_display_pos(200, 159 + (9 - length) * 50, True) cell = convert_mouse_to_letternum(200, 159 + (9 - length) * 50, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x + index] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the bottom right corner of the grid based on ship length if mouse_y > 160 + (9 - length) * 50 and mouse_x > 590: pos = convert_mouse_to_display_pos(580, 159 + (9 - length) * 50, True) cell = convert_mouse_to_letternum(580, 159 + (9 - length) * 50, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the top right corner of the grid if mouse_y < 160 and mouse_x > 590: pos = convert_mouse_to_display_pos(580, 170, True) cell = convert_mouse_to_letternum(580, 170, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the top of the grid if 190 <= mouse_x <= 590 and mouse_y < 160: pos = convert_mouse_to_display_pos(mouse_x, 170, True) cell = convert_mouse_to_letternum(mouse_x, 170, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the bottom of the grid based on ship length if 190 <= mouse_x <= 590 and mouse_y > 160 + (9 - length) * 50: pos = convert_mouse_to_display_pos(mouse_x, 158 + (9 - length) * 50, True) cell = convert_mouse_to_letternum(mouse_x, 158 + (9 - length) * 50, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the right side of the grid if mouse_x > 590 and 160 <= mouse_y <= 159 + (9 - length) * 50: pos = convert_mouse_to_display_pos(580, mouse_y, True) cell = convert_mouse_to_letternum(580, mouse_y, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + i][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None # check if the mouse is past the left side of the grid if mouse_x < 190 and 160 <= mouse_y <= 160 + (9 - length) * 50: pos = convert_mouse_to_display_pos(200, mouse_y, True) cell = convert_mouse_to_letternum(200, mouse_y, True) y, x = game_visualize.algebraic_to_index(cell) check_all = [] for i in range(0, length): pygame.draw.circle(screen, color, (pos[0], pos[1] + i * 50), 25) check_all.append(user_game_board[y + 1][x] is None) if click and all(check_all): for index in range(0, length): user_game_board[y + index][x] = game_code.Piece(ship_type) ships_on_board += 1 return None
[ "def _draw_ship(self):\n self.__screen.draw_ship(*self.__spaceship.get_draw_data())", "def mouse_moved(self, pos_x, pos_y):\n self.emit(\"mouseMoved\", pos_x, pos_y)\n self.mouse_position[0] = pos_x\n self.mouse_position[1] = pos_y\n if self.in_centring_state:\n self.graphics_centring_lines_item.set_start_position(pos_x, pos_y)\n elif self.in_grid_drawing_state:\n if self.graphics_grid_draw_item.is_draw_mode():\n self.graphics_grid_draw_item.set_draw_end_position(pos_x, pos_y)\n elif self.in_measure_distance_state:\n self.graphics_measure_distance_item.set_coord(self.mouse_position)\n elif self.in_measure_angle_state:\n self.graphics_measure_angle_item.set_coord(self.mouse_position)\n elif self.in_measure_area_state:\n self.graphics_measure_area_item.set_coord(self.mouse_position)\n elif self.in_move_beam_mark_state:\n self.graphics_move_beam_mark_item.set_end_position(\\\n self.mouse_position[0], self.mouse_position[1])\n elif self.in_beam_define_state:\n self.graphics_beam_define_item.set_end_position(\\\n self.mouse_position[0], self.mouse_position[1])\n elif self.in_select_items_state:\n \n self.graphics_select_tool_item.set_end_position(pos_x, pos_y)\n select_start_x = self.graphics_select_tool_item.start_coord[0]\n select_start_y = self.graphics_select_tool_item.start_coord[1]\n if abs(select_start_x - pos_x) > 5 and \\\n abs(select_start_y - pos_y) > 5:\n painter_path = QPainterPath()\n painter_path.addRect(min(select_start_x, pos_x),\n min(select_start_y, pos_y),\n abs(select_start_x - pos_x),\n abs(select_start_y - pos_y))\n self.graphics_view.graphics_scene.setSelectionArea(painter_path)\n \"\"\"\n for point in self.get_points():\n if point.isSelected():\n self.emit(\"pointSelected\", point)\n self.select_lines_and_grids()\n \"\"\"\n elif self.in_magnification_mode:\n self.graphics_magnification_item.set_end_position(pos_x, pos_y)", "def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard.\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Get rid of any remaining aliens and lasers.\n self.aliens.empty()\n self.lasers.empty()\n\n # Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n # Pause.\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)", "def place_ship_on_gameboard(self):\n\t\tfor i in range(self.length):\n\t\t\tself.Gameboard.invisibleGameboard[self._shipCoordinatesY[i]][self._shipCoordinatesX[i]] = \"X\"", "def mouse_press_cell(self):\r\n pos = pygame.mouse.get_pos()\r\n pygame.draw.rect(self.screen, WHITE, (floor(pos[0] / CELLSIZE) * CELLSIZE + 0.1 * CELLSIZE,\r\n floor(pos[1] / CELLSIZE) * CELLSIZE + 0.1 * CELLSIZE,\r\n CELLSIZE - 0.1 * CELLSIZE, CELLSIZE - 0.1 * CELLSIZE))\r\n\r\n x, y = floor(pos[1] / CELLSIZE), floor(pos[0] / CELLSIZE)\r\n\r\n return x, y", "def _ship_hit(self):\n\t\tif self.game_stats.ships_left > 0:\n\t\t\t# Reduce ship lives\n\t\t\tself.game_stats.ships_left -= 1\n\t\t\tself.scoreboard.prep_ships()\n\n\t\t\t# Get rid of aliens and remaining bullets\n\t\t\tself.aliens.empty()\n\t\t\tself.bullets.empty()\n\n\t\t\t# Create a new fleet\n\t\t\tself._create_fleet()\n\t\t\tself.ship.center_ship()\n\n\t\t\t# pause game\n\t\t\tsleep(0.5)\n\t\telse:\n\t\t\tself.game_stats.game_active = False\n\t\t\tpygame.mouse.set_visible(True)", "def spaceShip(x,y):\n gameDisplay.blit(spaceshipImg, (x,y))", "def mouse_released(self, pos_x, pos_y):\n if self.in_grid_drawing_state:\n QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))\n self.update_grid_motor_positions(self.graphics_grid_draw_item)\n self.graphics_grid_draw_item.set_draw_mode(False)\n self.wait_grid_drawing_click = False\n self.in_grid_drawing_state = False\n self.de_select_all()\n self.emit(\"shapeCreated\", self.graphics_grid_draw_item, \"Grid\")\n self.graphics_grid_draw_item.setSelected(True) \n self.shape_dict[self.graphics_grid_draw_item.get_display_name()] = \\\n self.graphics_grid_draw_item\n elif self.in_beam_define_state:\n self.stop_beam_define()\n elif self.in_select_items_state:\n self.graphics_select_tool_item.hide()\n self.in_select_items_state = False\n \"\"\"\n for point in self.get_points():\n if point.isSelected():\n self.emit(\"pointSelected\", point)\n \"\"\"\n self.select_lines_and_grids()", "def mouse_visible(self,x):\r\n\t\tpygame.mouse.set_visible(x)", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def visible(self, x, y):\n if x >= 0 and x <= 2 and y >= 3:\n return False\n if x >= 6 and x <= 8 and y >= 3:\n return False\n if x == 9:\n return False\n if x >= 13 and x <= 15 and y >= 3 and y <= 6:\n return False\n if x >= 13 and x <= 15 and y >= 10 and y <= 13:\n return False\n if (x == 18 or x == 20) and (y == 0 or y == 16):\n return False\n if (x == 18 and y == 8):\n return False\n if (x == 19):\n return False\n if (x >= 23 and x <= 25) and (y >= 3 and y <= 13):\n return False\n if (x >= 26 and x <= 28 and (y >= 3 and y <= 6)):\n return False\n if (x == 29):\n return False\n if x >= 33 and x <= 35 and y >= 3 and y <= 6:\n return False\n if x >= 33 and x <= 35 and y >= 10 and y <= 13:\n return False\n if x == 38 and (y == 0 or y == 8 or y == 16):\n return False\n return True", "def player_handle_move(self) -> None:\r\n mouse_pos = pg.mouse.get_pos()\r\n mouse_click = pg.mouse.get_pressed()\r\n\r\n for i in range(1, self.size + 1):\r\n x = i * self.gap_size + (i - 1) * self.box_size\r\n for j in range(1, self.size + 1):\r\n y = j * self.gap_size + (j - 1) * self.box_size\r\n if x < mouse_pos[0] < x + self.box_size and y < mouse_pos[1] < y + self.box_size and self.tags[i-1][j-1] is None:\r\n displayWindow.blit(self.cross, (x, y))\r\n\r\n if mouse_click[0] == 1:\r\n self.tags[i-1][j-1] = 'x'\r\n self.player_move_in_progress = False", "def draw_grid(play_area):\n for x in range(0, PLAY_AREA_WIDTH, GRID_SIZE):\n pygame.draw.line(play_area, GRID_COLOR,\n (x, 0), (x, PLAY_AREA_HEIGHT), 1)\n for y in range(0, PLAY_AREA_HEIGHT, GRID_SIZE):\n pygame.draw.line(play_area, GRID_COLOR,\n (0, y), (PLAY_AREA_WIDTH, y), 1)", "def _ship_hit(self):\n # livews are still remaining\n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard.\n self.stats.ships_left -= 1 # decrement number of lilves remaining\n self.sb.prep_ships() # Show how many ships are left.\n \n # Get rid of any remaining aliens and bullets.\n self.aliens.empty() # remove remaining aliens\n self.bullets.empty() # remove remaining bullets\n \n # Create a new fleet and center the ship.\n self._create_fleet() # create a fleet of Instances of alien objects\n self.ship.center_ship() # Center the ship on the screen\n \n # Pause.\n sleep(0.5) # sleep for half a second\n else: # no lives remaining\n self.stats.game_active = False # set game inactive\n pygame.mouse.set_visible(True) # set mouse pointer to visible", "def display_ships_hidden(game: game_code.BattleshipGame, player_1: bool) -> None:\r\n for cell_number in range(0, 8):\r\n for cell_letter in range(0, 8):\r\n piece = game.get_board()[cell_number][cell_letter]\r\n if piece is not None:\r\n cell = game_visualize.index_to_algebraic((cell_number, cell_letter))\r\n display_piece(player_1, cell, piece.kind)", "def set_as_ship(self):\n self.is_ship = True", "def mouse_handler(self,events):\n\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousedown = True\n self.mousebutton = event.button\n elif event.type == pygame.MOUSEBUTTONUP:\n self.mousedown = False\n self.mousebutton = event.button\n self.mouseX, self.mouseY = pygame.mouse.get_pos()\n\n #manage tool events\n if self.draw_tool == \"Line\":\n self.draw_line_template()\n if self.draw_tool == \"Circle\":\n self.draw_circle_template()\n\n #show mouse state\n self.show_mousestate()", "def make_ship(self):\n self.is_ship = True", "def draw_current_row(self):\n if self.row <= 9:\n y = Y_POS[self.row]\n pointlist = [(5, y-5), (10, y), (5, y+5)]\n pygame.draw.polygon(self.screen, BLACK, pointlist)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the file name for this neural network attached to this instance.
def get_filename(self): return self.net.filename
[ "def file_name(self):\n ret = self._get_attr(\"fileName\")\n return ret", "def network_name(self):\n ret = self._get_attr(\"networkName\")\n return ret", "def name(self):\n self.filename = self.model.name+\"_\"\n for k,p in self.params.items():\n self.filename += k+\"_\"+str(p).replace(\".\", \",\")+\"_\"\n self.filename += str(self.nb_dataset)", "def file_name(self) -> str:\n return pulumi.get(self, \"file_name\")", "def get_filename(self) -> str:\n return self._filename", "def get_network_name(self):\n start = self.network.find(\"network\")\n end = self.network.find(\"}\\n\", start)\n # Creating a network attribute\n network_attribute = (\n Suppress(\"network\") + Word(pp.unicode.alphanums + \"_\" + \"-\") + \"{\"\n )\n network_name = network_attribute.searchString(self.network[start:end])[0][0]\n\n return network_name", "def name(self):\n if not self._name:\n self._name = self._layer.GetName()\n return self._name", "def file_name(self) -> str:\n return self._occurrence_data.get('fileName') # type: ignore", "def current_filename(self):\n return \"%s_%s_%s.png\" % (LABELS[self.metadata['creating_entity']],\n SECTORS[self.metadata['sector']],\n CHANNELS[self.metadata['channel']])", "def get_filename(self):\n return self.source.get_filename()", "def filename(self) -> str:\n return os.path.splitext(\n os.path.basename(\n unquote(\n urlparse(\n self.original_url\n ).path\n )\n )\n )[0] + \".png\"", "def name(self):\n assert self.file_name_prefix, 'The self.file_name_prefix variable must be set in your inheriting class'\n return '%s-%s.xml' % (self.file_name_prefix[0], datetime.today().strftime('%Y%m%d-%H%M%S-%f'))", "def name(self):\n return self._output.name", "def get_classifier_filename(self):\n\n # Classifier filename is parametrized by important experiment parameters.\n return 'classifier_{}_lr{}_rr{}_m{}_c{}_a{}.pkl'.format(self.classifier_params['input_feature'],\n self.classifier_params['learning_rate'],\n self.classifier_params['random_restarts'],\n self.classifier_params['num_measurements'],\n self.classifier_params['counter'],\n self.classifier_params['a_index'])", "def filename(self):\n if self.type == 'literal':\n return self._message.filename\n return ''", "def get_download_file_name(self):\n # Use 'unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.get_url_name()\n else:\n term = 'unknown'\n\n return 'syllabus-{course}-{term}-{instructors}{ext}'.format(\n course=self.course_instance.course.get_url_name(),\n term=term,\n instructors='_'.join([i.last_name for i in self.instructors]),\n ext=self.file_ext)", "def getFilename(self, frameNum):\n\t\treturn self.format % (self.dirname, self.frameName, frameNum)", "def DwfFileName(self) -> str:", "def filename(self):\n return f\"{self.sha}{self.extension}\"", "def _get_model_filename(self) -> str:\n model_filename = f'{self.model_dir}/{self.description}.{self._get_model_file_extension()}'\n return model_filename" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The user can input a list of images if they like to create static masks as well as optional values for static_sig and inputDict. The configObj.cfg file will set the defaults and then override them with the user options.
def createMask(input=None, static_sig=4.0, group=None, editpars=False, configObj=None, **inputDict): if input is not None: inputDict["static_sig"]=static_sig inputDict["group"]=group inputDict["updatewcs"]=False inputDict["input"]=input else: print >> sys.stderr, "Please supply an input image\n" raise ValueError #this accounts for a user-called init where config is not defined yet configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars)) if configObj is None: return if not editpars: run(configObj)
[ "def process_model_config(model_cfg: mmengine.Config,\n imgs: Union[Sequence[str], Sequence[np.ndarray]],\n input_shape: Optional[Sequence[int]] = None):\n if isinstance(imgs[0], np.ndarray):\n # set loading pipeline type\n model_cfg.test_pipeline[0].type = 'LoadImageFromNDArray'\n test_pipeline = model_cfg._cfg_dict.test_pipeline\n for i, transform in enumerate(test_pipeline):\n if transform.type == 'PackTextRecogInputs':\n test_pipeline[i].meta_keys = tuple(\n j for j in test_pipeline[i].meta_keys if j != 'instances')\n\n # for static exporting\n if input_shape is not None and transform.type == 'RescaleToHeight':\n resize = {\n 'height': input_shape[1],\n 'min_width': input_shape[0],\n 'max_width': input_shape[0]\n }\n test_pipeline[i].update(resize)\n\n test_pipeline = [\n transform for transform in test_pipeline\n if transform.type != 'LoadOCRAnnotations'\n ]\n\n model_cfg.test_pipeline = test_pipeline\n return model_cfg", "def image_processing_parameters(recipe_config):\n params = {}\n params[Constants.FUNCTIONS_DEF] = recipe_config.get(Constants.FUNCTIONS_DEF, None)\n params[Constants.PIPELINE_DEF] = recipe_config.get(Constants.PIPELINE_DEF, None)\n return params", "def prepare_model_cli(\n img_input: str or Path,\n output_path: str or Path,\n masks: str or Path,\n file_types: str,\n) -> [list, list, Path]:\n from tiatoolbox.utils.misc import grab_files_from_dir, string_to_tuple\n\n img_input = no_input_message(input_file=img_input)\n output_path = Path(output_path)\n file_types = string_to_tuple(in_str=file_types)\n\n if output_path.exists():\n msg = \"Path already exists.\"\n raise FileExistsError(msg)\n\n if not Path.exists(img_input):\n raise FileNotFoundError\n\n files_all = [\n img_input,\n ]\n\n masks_all = None\n\n if masks is not None:\n masks = Path(masks)\n if masks.is_file():\n masks_all = [masks]\n if masks.is_dir():\n masks_all = grab_files_from_dir(\n input_path=masks,\n file_types=(\"*.jpg\", \"*.png\"),\n )\n\n if Path.is_dir(img_input):\n files_all = grab_files_from_dir(input_path=img_input, file_types=file_types)\n\n return [files_all, masks_all, output_path]", "def _addAdditionalOptics(self,optsys, oversample=2):\n\n #optsys.addImage(name='null for debugging NIRcam _addCoron') # for debugging\n\n if self.image_mask == 'MASK210R':\n optsys.addImage(function='BandLimitedCoron', kind='nircamcircular', sigma=5.253 , name=self.image_mask)\n trySAM = True\n SAM_box_size = 5.0\n elif self.image_mask == 'MASK335R':\n optsys.addImage(function='BandLimitedCoron', kind='nircamcircular', sigma=3.2927866 , name=self.image_mask)\n trySAM = True\n SAM_box_size = 5.0\n elif self.image_mask == 'MASK430R':\n optsys.addImage(function='BandLimitedCoron', kind='nircamcircular', sigma=2.588496*0.99993495 , name=self.image_mask)\n trySAM = True\n SAM_box_size = 5.0\n elif self.image_mask == 'MASKSWB':\n optsys.addImage(function='BandLimitedCoron', kind='nircamwedge', wavelength=2.1e-6, name=self.image_mask)\n trySAM = False #True FIXME\n SAM_box_size = [5,20]\n elif self.image_mask == 'MASKLWB':\n optsys.addImage(function='BandLimitedCoron', kind='nircamwedge', wavelength=4.6e-6, name=self.image_mask)\n trySAM = False #True FIXME\n SAM_box_size = [5,20]\n else:\n # no occulter selected but coronagraphic mode anyway.\n trySAM = False\n SAM_box_size = 1.0 # irrelevant but variable still needs to be set.\n \n # add pupil plane mask\n if ('pupil_shift_x' in self.options.keys() and self.options['pupil_shift_x'] != 0) or \\\n ('pupil_shift_y' in self.options.keys() and self.options['pupil_shift_y'] != 0):\n shift = (self.options['pupil_shift_x'], self.options['pupil_shift_y'])\n else: shift = None\n\n\n #optsys.addPupil( name='null for debugging NIRcam _addCoron') # debugging\n if self.pupil_mask == 'CIRCLYOT':\n optsys.addPupil(transmission=self._datapath+\"/optics/NIRCam_Lyot_Somb.fits\", name=self.pupil_mask, shift=shift)\n elif self.pupil_mask == 'WEDGELYOT':\n optsys.addPupil(transmission=self._datapath+\"/optics/NIRCam_Lyot_Sinc.fits\", name=self.pupil_mask, shift=shift)\n elif self.pupil_mask == 'WEAK LENS +4':\n optsys.addPupil(poppy.ThinLens(name='Weak Lens +4', nwaves=4, reference_wavelength=2e-6))\n elif self.pupil_mask == 'WEAK LENS +8':\n optsys.addPupil(poppy.ThinLens(name='Weak Lens +8', nwaves=8, reference_wavelength=2e-6))\n elif self.pupil_mask == 'WEAK LENS -8':\n optsys.addPupil(poppy.ThinLens(name='Weak Lens -8', nwaves=-8, reference_wavelength=2e-6))\n elif self.pupil_mask == 'WEAK LENS +12 (=4+8)':\n stack = poppy.CompoundAnalyticOptic(name='Weak Lens Stack +12', opticslist=[\n poppy.ThinLens(name='Weak Lens +4', nwaves=4, reference_wavelength=2e-6),\n poppy.ThinLens(name='Weak Lens +8', nwaves=8, reference_wavelength=2e-6)])\n optsys.addPupil(stack)\n elif self.pupil_mask == 'WEAK LENS -4 (=4+8)':\n stack = poppy.CompoundAnalyticOptic(name='Weak Lens Stack -4', opticslist=[\n poppy.ThinLens(name='Weak Lens +4', nwaves=4, reference_wavelength=2e-6),\n poppy.ThinLens(name='Weak Lens -8', nwaves=-8, reference_wavelength=2e-6)])\n optsys.addPupil(stack)\n\n\n elif (self.pupil_mask is None and self.image_mask is not None):\n optsys.addPupil(name='No Lyot Mask Selected!')\n\n return (optsys, trySAM, SAM_box_size)", "def define_preprocess_input(args):\n MODELS = {\n \"vgg16\": vgg16.VGG16,\n \"vgg19\": vgg19.VGG19,\n \"inception\": inception_v3.InceptionV3,\n \"xception\": xception.Xception,\n \"resnet50\": resnet50.ResNet50\n }\n\n # when use customized structure\n # if not args.pretrain:\n # def preprocess_input(x):\n # img = imagenet_utils.preprocess_input(image.img_to_array(x)) # scale pixels between -1 and 1, sample-wise: x /= 127.5, x -= 1\n # return image.array_to_img(img)\n if not args.pretrain:\n # when args.channels = 3\n if args.channels == 3:\n def preprocess_input(x):\n img = imagenet_utils.preprocess_input(image.img_to_array(x)) #scale pixels between -1 and 1, sample-wise: x /= 127.5, x -= 1\n return image.array_to_img(img)\n # when channels = 1\n elif args.channels == 1:\n def preprocess_input(x):\n img = image.img_to_array(x)\n # resize\n img = cv2.resize(img, (args.img_size, args.img_size), interpolation = cv2.INTER_CUBIC)\n img = image.img_to_array(img) # img_to_array able to make ndarray [28,28] -> [28,28,1]\n # normalization\n img /= 225.0\n img = image.array_to_img(img) #input img rank have to be 3\n return img\n\n elif args.model_name in ('vgg16', 'vgg19', 'resnet50'):\n def preprocess_input(x):\n img = imagenet_utils.preprocess_input(image.img_to_array(x))\n return image.array_to_img(img)\n\n elif args.model_name in (\"inception\", \"xception\"):\n def preprocess_input(x):\n img = inception_v3.preprocess_input(image.img_to_array(x))\n return image.array_to_img(img)\n\n elif args.pretrain and args.model_name not in MODELS:\n print('input pretrain model preprocessing has not been pre-defined yet')\n raise AttributeError\n\n return preprocess_input", "def __init__ ( self ) :\n\n self.m_key_in = self.configStr ('key_in', 'image')\n self.m_ofname = self.configStr ('ofname', 'img-cspad')\n self.m_print_bits = self.configInt ('print_bits', 1)\n\n if self.m_print_bits & 1 : self.print_input_pars()", "def _vmware_static_ip_config(self, args: parser_extensions.Namespace):\n if 'static_ip_config_from_file' in args.GetSpecifiedArgsDict():\n return self._vmware_static_ip_config_from_file(args)\n\n if 'static_ip_config_ip_blocks' in args.GetSpecifiedArgsDict():\n return self._vmware_static_ip_config_ip_blocks(args)\n\n return None", "def defaultParams(self):\n self.blurs = [[-1, self.fileRes], [-1, self.fileRes],[-1, self.fileRes]] \n self.gradient = [[False,True], [False,True], [False,True]]\n self.similarityMetric = [[\"CC\", \"CC\"],[\"CC\", \"CC\"],[\"CC\", \"CC\"]]\n self.weight = [[1,1],[1,1],[1,1]]\n self.radiusHisto = [[3,3],[3,3],[3,3]]\n self.transformationModel = [\"SyN[0.1]\", \"SyN[0.1]\", \"SyN[0.1]\"]\n self.regularization = [\"Gauss[2,1]\", \"Gauss[2,1]\", \"Gauss[2,1]\"]\n self.iterations = [\"100x100x100x0\", \"100x100x100x20\", \"100x100x100x100\"]\n self.useMask = [False, True, True]\n self.memoryRequired = [0.177, 1.385e-7, 2.1e-7]", "def _addAdditionalOptics(self,optsys, oversample=2):\n trySAM = False # semi-analytic method never applicable here. \n SAM_box_size = None\n if self.image_mask == 'S200A1' or self.image_mask == 'S200A2' or self.image_mask == 'S200B1':\n # three identical slits, 0.2 x 3.2 arcsec in length\n optsys.addImage(optic=poppy.IdealRectangularFieldStop(width=0.2, height=3.2, name= self.image_mask + \" slit\"))\n elif self.image_mask == 'S400A1':\n # one slit, 0.4 x 3.65 arcsec in height\n optsys.addImage(optic=poppy.IdealRectangularFieldStop(width=0.4, height=3.65, name= self.image_mask + \" slit\"))\n elif self.image_mask == 'S1600A1':\n # square aperture for exoplanet spectroscopy\n optsys.addImage(optic=poppy.IdealRectangularFieldStop(width=1.6, height=1.6, name= self.image_mask + \" square aperture\"))\n elif self.image_mask == 'MSA all open':\n # all MSA shutters open \n optsys.addImage(optic=NIRSpec_MSA_open_grid(name= self.image_mask))\n elif self.image_mask == 'Single MSA open shutter':\n # one MSA open shutter aperture \n optsys.addImage(optic=poppy.IdealRectangularFieldStop(width=0.2, height=0.45, name= self.image_mask))\n elif self.image_mask == 'Three adjacent MSA open shutters':\n optsys.addImage(optic=NIRSpec_three_MSA_shutters(name=self.image_mask))\n\n \n\n\n if ((self.pupil_mask is not None) and ('grating' in self.pupil_mask.lower())):\n # NIRSpec pupil stop at the grating appears to be a rectangle.\n # see notes and ray trace from Erin Elliot in the webbpsf-data/NIRSpec/sources directory\n optsys.addPupil(optic=poppy.RectangleAperture(height=8.41, width=7.91, name='Pupil stop at grating wheel'))\n\n #if (self.pupil_mask is None and self.image_mask is not None):\n # if we don't have a specific pupil stop, just assume for now we're\n # stopped down to a JWST like geometry\n # FIXME this is not really right - should be updated for the NIRSpec grating wheels\n #optsys.addPupil(optic=optsys[0], name='No Pupil stop provided')\n #optsys.addPupil(optic=poppy.SquareAperture(size=3.5, name='Pupil stop at grating wheel'))\n\n\n\n return (optsys, trySAM, SAM_box_size)", "def from_config(cls, config: Dict[str, Any]) -> \"ImgPilToPatchesAndImage\":\n return cls(**config)", "def ProcessInput(config, logger=None, file_scope_only=False, safe_only=False):\n if 'input' in config:\n logger = LoggerWrapper(logger)\n file_num = config.get('file_num',0)\n logger.debug('file %d: Start ProcessInput',file_num)\n\n # We'll iterate through this list of keys a few times\n all_keys = [str(k) for k in config['input'].keys() if k in valid_input_types]\n\n # The input items can be rather large. Especially RealGalaxyCatalog. So it is\n # unwieldy to copy them in the config file for each process. Instead we use proxy\n # objects which are implemented using multiprocessing.BaseManager. See\n #\n # http://docs.python.org/2/library/multiprocessing.html\n #\n # The input manager keeps track of all the real objects for us. We use it to put\n # a proxy object in the config dict, which is copyable to other processes.\n # The input manager itself should not be copied, so the function CopyConfig makes\n # sure to only keep that in the original config dict, not the one that gets passed\n # to other processed.\n # The proxy objects are able to call public functions in the real object via\n # multiprocessing communication channels. (A Pipe, I believe.) The BaseManager\n # base class handles all the details. We just need to register each class we need\n # with a name (called tag below) and then construct it by calling that tag function.\n\n # We don't need the manager stuff if we (a) are already in a multiprocessing Process, or\n # (b) we are only loading for file scope, or (c) both config.image.nproc and\n # config.output.nproc == 1.\n use_manager = (\n 'current_nproc' not in config and\n not file_scope_only and\n ( ('image' in config and 'nproc' in config['image'] and\n ParseValue(config['image'], 'nproc', config, int)[0] != 1) or\n ('output' in config and 'nproc' in config['output'] and\n ParseValue(config['output'], 'nproc', config, int)[0] != 1) ) )\n\n if use_manager and '_input_manager' not in config:\n class InputManager(SafeManager): pass\n\n # Register each input field with the InputManager class\n for key in all_keys:\n fields = config['input'][key]\n nfields = len(fields) if isinstance(fields, list) else 1\n for num in range(nfields):\n tag = key + str(num)\n init_func = valid_input_types[key].init_func\n proxy = InputProxy(init_func)\n InputManager.register(tag, init_func, proxy)\n # Start up the input_manager\n config['_input_manager'] = InputManager()\n config['_input_manager'].start()\n\n # Read all input fields provided and create the corresponding object\n # with the parameters given in the config file.\n for key in all_keys:\n loader = valid_input_types[key]\n\n # Skip this key if not relevant for file_scope_only run.\n if file_scope_only and not loader.file_scope: continue\n\n logger.debug('file %d: Process input key %s',file_num,key)\n fields = config['input'][key]\n nfields = len(fields) if isinstance(fields, list) else 1\n\n for num in range(nfields):\n input_obj = LoadInputObj(config, key, num, safe_only, logger)\n\n # Check that there are no other attributes specified.\n valid_keys = valid_input_types.keys()\n CheckAllParams(config['input'], ignore=valid_keys)", "def shared_client_hints(self, **options):\n tag = CloudinaryImage(self.full_public_id).image(**options)\n six.assertRegex(self, tag, '<img.*>', \"should not use data-src or set responsive class\")\n self.assertIsNone(re.match('<.* class.*>', tag), \"should not use data-src or set responsive class\")\n self.assertIsNone(re.match('\\bdata-src\\b', tag), \"should not use data-src or set responsive class\")\n expected_re = 'src=[\"\\']{url}/c_scale,dpr_auto,w_auto/{id}[\"\\']'.format(**self.common_format)\n six.assertRegex(self, tag, expected_re, \"should not use data-src or set responsive class\")\n cloudinary.config(responsive=True)\n tag = CloudinaryImage(self.full_public_id).image(**options)\n six.assertRegex(self, tag, '<img.*>')\n self.assertIsNone(re.match('<.* class.*>', tag), \"should override responsive\")\n self.assertIsNone(re.match('\\bdata-src\\b', tag), \"should override responsive\")\n\n six.assertRegex(self, tag, expected_re, \"should override responsive\")", "def __init__ ( self,\n sources = None,\n threshold = None,\n image_rotations = None,\n image_shifts = None,\n image_scales = None,\n image_nicknames = None,\n image_manipulations = None, \n output_file = None,\n n_hdf5 = None ,\n plot_every_n = None,\n accumulate_n = None,\n fignum = \"1\" ):\n\n opt = PyanaOptions() # convert option string to appropriate type\n self.plot_every_n = opt.getOptInteger(plot_every_n)\n self.mpl_num = opt.getOptInteger(fignum)\n\n self.sources = opt.getOptStrings(sources)\n nsources = len(self.sources)\n print \"pyana_image, %d sources: \" % nsources\n for sources in self.sources :\n print \" \", sources\n\n self.image_nicknames = []\n if image_nicknames is None:\n for i in range (0, len(self.sources) ):\n self.image_nicknames.append( \"Im%d\"%(i+1) )\n else :\n self.image_nicknames = image_nicknames.split(\" \")\n\n \n self.image_rotations = None\n if image_rotations is not None:\n if image_rotations == \"\" or image_rotations == \"None\" :\n self.image_rotations = None\n else : \n self.image_rotations = {}\n list_of_rotations = image_rotations.split(\" \")\n if len(list_of_rotations) != nsources: print \"Plz provide rotation angles for *all* images!\"\n i = 0\n for source in self.sources :\n self.image_rotations[source] = float( list_of_rotations[i] )\n i+=1\n \n \n self.image_shifts = None\n if image_shifts is not None:\n if image_shifts == \"\" or image_shifts == \"None\" :\n self.image_shifts = None\n else :\n self.image_shifts = {}\n list_of_shifts = image_shifts.split(\" \") \n if len(list_of_shifts) != nsources: print \"Plz provide shift amount for *all* images!\"\n i = 0\n for source in self.sources :\n shift = list_of_shifts[i].lstrip(\"(\").rstrip(\")\").split(\",\")\n self.image_shifts[source] = (int(shift[0]), int(shift[1]))\n i+=1\n\n self.image_scales = None\n if image_scales is not None:\n if image_scales == \"\" or image_scales == \"None\" :\n self.image_scales = None\n else :\n self.image_scales = {}\n list_of_scales = image_scales.split(\" \") \n if len(list_of_scales) != nsources: print \"Plz provide scale factors for *all* images!\"\n i = 0\n for sources in self.image_adresses :\n self.image_scales[source] = float( list_of_scales[i] )\n i+=1\n\n self.image_manipulations = None\n if image_manipulations is not None:\n if image_manipulations == \"\" or image_manipulations == \"None\" :\n self.image_manipulations = None\n else : \n self.image_manipulations = image_manipulations\n\n\n self.output_file = output_file\n if output_file == \"\" or output_file == \"None\" :\n self.output_file = None\n print \"Using output_file: \", self.output_file\n\n threshold_string = opt.getOptStrings(threshold)\n # format: 'value (xlow:xhigh,ylow:yhigh)', only value is required\n \n self.threshold = None\n if len(threshold_string)>0:\n self.threshold = Threshold()\n self.threshold.value = opt.getOptFloat(threshold_string[0])\n print \"Using threshold value \", self.threshold.value\n if len(threshold_string)>1:\n self.threshold.area = np.array([0.,0.,0.,0.])\n \n intervals = threshold_string[1].strip('()').split(',')\n xrange = intervals[0].split(\":\")\n yrange = intervals[1].split(\":\")\n self.threshold.area[0] = float(xrange[0])\n self.threshold.area[1] = float(xrange[1])\n self.threshold.area[2] = float(yrange[0])\n self.threshold.area[3] = float(yrange[1])\n \n print \"Using threshold area \", self.threshold.area\n \n\n self.n_hdf5 = None\n if n_hdf5 is not None :\n if n_hdf5 == \"\" or n_hdf5 == \"None\" :\n self.n_hdf5 = None\n else :\n self.n_hdf5 = int(n_hdf5)\n\n # to keep track\n self.n_shots = None\n\n # averages\n self.sum_good_images = {}\n self.sum_dark_images = {}\n self.n_good = {}\n self.n_dark = {}\n for addr in self.sources :\n self.sum_good_images[addr] = None\n self.sum_dark_images[addr] = None\n self.n_good[addr] = 0\n self.n_dark[addr] = 0\n\n # output file\n self.hdf5file = None\n if self.output_file is not None :\n if \".hdf5\" in self.output_file and self.n_hdf5 is None:\n print \"opening %s for writing\" % self.output_file\n self.hdf5file = h5py.File(self.output_file, 'w')\n\n self.plotter = Plotter() \n self.plotter.settings(7,7) # set default frame size", "def input_plot_settings():\n inputpath = \".\"\n scalingfactor = 0\n changerange = False\n axisrange = []\n\n if input(\"Do you want to change the default settings? [y/n]\\n\").lower() == \"y\":\n inputpath = input(\"Please enter directory of the input .dat files:\\n\")\n inputpath = inputpath.replace(\"//\", \"/\")\n scalingfactor = float(input(\"Enter scaling factor for eigenstates (0 = autoscale):\\n\"))\n if input(\"Do you want to change the axis range? [y/n]\\n\").lower() == \"y\":\n changerange = True\n axisrange = input(\"Enter xmin, xmax, ymin, ymax (seperated by \\\",\\\"):\\n\").split(\",\")\n axisrange = list(map(float, axisrange))\n\n return (inputpath, scalingfactor, changerange, axisrange)", "def set_defaults_and_validate_options():\n global options\n\n # Initialize globals\n init_globals()\n\n # Required parameters\n if \"options\" not in globals():\n abort(\"Options dictionary was not defined!\")\n\n # Check which mode we're using\n if options.get(\"mode\", \"\") == \"personality\":\n required_parameters = set()\n else:\n required_parameters = set([\"target_system_image\"])\n\n # USB doesn't require remote credentials\n if os.environ.get(\"POAP_PHASE\", None) != \"USB\":\n # Remote download needs more parameters\n required_parameters.add(\"username\")\n required_parameters.add(\"password\")\n required_parameters.add(\"hostname\")\n\n # If we are missing any required parameters\n missing_parameters = required_parameters.difference(list(options.keys()))\n if len(missing_parameters) != 0:\n poap_log(\"Required parameters are missing:\")\n abort(\"Missing %s\" % \", \".join(missing_parameters))\n\n # Set the POAP mode\n set_default(\"mode\", \"serial_number\")\n\n # Required space to copy config kickstart and system image in KB\n set_default(\"required_space\", 100000)\n # Transfer protocol (http, ftp, tftp, scp, etc.)\n set_default(\"transfer_protocol\", \"scp\")\n # Directory where the config resides\n set_default(\"config_path\", \"/var/lib/tftpboot/\")\n # Target image and its path (single image is default)\n set_default(\"target_system_image\", \"\")\n set_default(\"target_image_path\", \"/var/lib/tftpboot/\")\n set_default(\"target_kickstart_image\", \"\")\n # Destination image and its path\n set_default(\"destination_path\", \"/bootflash/\")\n set_default(\"destination_system_image\", options[\"target_system_image\"])\n set_default(\"destination_kickstart_image\", options[\"target_kickstart_image\"])\n set_default(\"destination_midway_system_image\", \"midway_system.bin\")\n set_default(\"skip_multi_level\", False)\n set_default(\"destination_midway_kickstart_image\", \"midway_kickstart.bin\")\n set_default(\"serial_number\",\"\");\n set_default(\"install_path\", \"\")\n set_default(\"use_nxos_boot\", False)\n set_default(\"https_ignore_certificate\", False)\n \n # User app path\n set_default(\"user_app_path\", \"/var/lib/tftpboot/\")\n\n # MD5 Verification\n set_default(\"disable_md5\", False)\n\n # Midway system and kickstart source file name.\n # This should be a 6.x U6 or greater dual image.\n # Required only if moving from pre 6.x U6 image to 7.x/higher image.\n set_default(\"midway_system_image\", \"\")\n set_default(\"midway_kickstart_image\", \"\")\n set_default(\"skip_multi_level\", False)\n # --- USB related settings ---\n # USB slot info. By default its USB slot 1, if not specified specifically.\n # collina2 has 2 usb ports. To enable poap in usb slot 2 user has to set the\n # value of usbslot to 2.\n set_default(\"usb_slot\", 1)\n # Source file name of Config file\n set_default(\"source_config_file\", \"poap.cfg\")\n\n set_default(\"vrf\", os.environ['POAP_VRF'])\n set_default(\"destination_config\", \"poap_conf.cfg\")\n set_default(\"split_config_first\", \"poap_1.cfg\")\n set_default(\"split_config_second\", \"poap_2.cfg\")\n\n # Timeout info (in seconds)\n # Not applicable for TFTP protocol. POAP script timeout can help\n # in the case of TFTP.\n set_default(\"timeout_config\", 120) # 2 minutes\n set_default(\"timeout_copy_system\", 2100) # 35 minutes\n set_default(\"timeout_copy_kickstart\", 900) # 15 minutes\n set_default(\"timeout_copy_personality\", 900) # 15 minutes\n set_default(\"timeout_copy_user\", 900) # 15 minutes\n\n # Personality\n set_default(\"personality_path\", \"/var/lib/tftpboot\")\n set_default(\"source_tarball\", \"personality.tar\")\n set_default(\"destination_tarball\", options[\"source_tarball\"])\n set_default(\"compact_image\", False)\n\n # Check that options are valid\n validate_options()", "def _set_kwargs_defaults(**kwargs):\n\t\tkwargs.setdefault('fig_title', 'Figure') # Figure title.\n\t\tkwargs.setdefault('write_title', False) # Decides whether the title should be written within the plot or not.\n\t\tkwargs.setdefault('legend_title', '') # Write a legend title.\n\t\tkwargs.setdefault('xscale', 'linear') # Set a scale for the x axis.\n\t\tkwargs.setdefault('yscale', 'linear') # Set a scale for the y axis.\n\t\tkwargs.setdefault('xlabel', None) # Set a default label for the x axis.\n\t\tkwargs.setdefault('ylabel', 'y') # Set a default label for the y axis.\n\t\tkwargs.setdefault('save_fig', False) # Decides if the figure will be saved or not.\n\t\tkwargs.setdefault('save_mat', False) # Decides if the data is exported to a .mat file or not.\n\t\tkwargs.setdefault('image_format', '.png') # Set an extension for the image. This will be used if saving image.\n\t\tkwargs.setdefault('open_folders',\n\t\t False) # Indicate if the program should open the folder where files are stored.\n\t\treturn kwargs", "def _input_def(filenames, labels):\n\n assert len(filenames) == len(labels), \"Filenames and labels should have same length\"\n images = []\n for i in range(len(filenames)):\n im_i = _parse_function(filenames[i])\n images.append(im_i)\n\n return images", "def parse_network_from_config(args, input_shape):\n\n # parse standard cases\n if isinstance(args, dict):\n if args['net'] in ['resnet18', 'resnet34', 'resnet50']:\n from torchvision.models import resnet18, resnet34, resnet50\n\n resnet_fn = None\n if args['net'] == 'resnet18':\n resnet_fn = resnet18\n if args['net'] == 'resnet34':\n resnet_fn = resnet34\n if args['net'] == 'resnet50':\n resnet_fn = resnet50\n\n norm_layer = torch.nn.BatchNorm2d\n if args.get('norm_layer', '') == 'GroupNorm':\n norm_layer = group_norm_partial_apply_fn(num_groups=32)\n if args.get('norm_layer', '') == 'none':\n norm_layer = (lambda num_channels: Identity())\n\n num_classes = args.get('num_classes', 1000)\n pretrained = args.get('pretrained', False)\n\n # if pretraining is enabled but number of classes is not 1000 replace the last layer\n if pretrained and num_classes != 1000:\n net = resnet_fn(norm_layer=norm_layer, num_classes=1000, pretrained=pretrained)\n net.fc = nn.Linear(net.fc.in_features, num_classes)\n else:\n net = resnet_fn(norm_layer=norm_layer, num_classes=num_classes, pretrained=pretrained)\n output_shape = infer_shape([net], input_shape)\n print(\"output.shape:\", output_shape)\n return net, output_shape\n\n if args['net'] in ['resnet18-cifar', 'resnet34-cifar']:\n from .networks.resnet_cifar import resnet18, resnet34\n\n resnet_fn = None\n if args['net'] == 'resnet18-cifar':\n resnet_fn = resnet18\n if args['net'] == 'resnet34-cifar':\n resnet_fn = resnet34\n\n norm_layer = torch.nn.BatchNorm2d\n if args.get('norm_layer', '') == 'GroupNorm':\n norm_layer = group_norm_partial_apply_fn(num_groups=32)\n if args.get('norm_layer', '') == 'none':\n norm_layer = (lambda num_channels: Identity())\n net = resnet_fn(num_classes=args['num_classes'], norm_layer=norm_layer)\n output_shape = infer_shape([net], input_shape)\n print(\"output.shape:\", output_shape)\n return net, output_shape\n\n # parse feed forward\n return parse_feed_forward(args, input_shape)", "def getInput3Sersic(config, readinFile='cutout_3ser.in', constr=False, skyGrad=True):\n\n f = open(readinFile, 'w')\n\n f.write('\\n')\n f.write('===============================================================================\\n')\n f.write('# IMAGE and GALFIT CONTROL PARAMETERS\\n')\n f.write('A) %s # Input data image (FITS file)\\n' % config['image'][0])\n f.write('B) %s # Output data image block\\n' % config['output'][0])\n f.write('C) %s # Sigma image name\\n' % config['sig'][0])\n f.write('D) %s # Input PSF image \\n' % config['psf'][0])\n f.write('E) 1 # PSF fine sampling factor relative to data \\n')\n f.write('F) %s # Bad pixel mask\\n' % config['mask'][0])\n f.write('G) %s # File with parameter constraints \\n' % config['constr'][0])\n f.write('H) 1 %5d 1 %5d # Image region to fit\\n' % (config['dimx'],\n config['dimy']))\n f.write('I) %5d %5d # Size of the convolution box\\n' % (config['convbox'],\n config['convbox']))\n f.write('J) %6.2f # Magnitude photometric zeropoint \\n' % config['zp'])\n f.write('K) %7.3f %7.3f # Plate scale (dx dy)\\n' % (config['pix'], config['pix']))\n f.write('O) regular # Display type (regular, curses, both)\\n')\n f.write('P) 0 # Choose: 0=optimize, 1=model, 2=imgblock, 3=subcomps\\n')\n f.write('\\n')\n f.write('# INITIAL FITTING PARAMETERS\\n')\n f.write('#\\n')\n f.write('# For object type, the allowed functions are: \\n')\n f.write('# nuker, sersic, expdisk, devauc, king, psf, gaussian, moffat, \\n')\n f.write('# ferrer, powsersic, sky, and isophote. \\n')\n f.write('# \\n')\n f.write('# Hidden parameters will only appear when they''re specified:\\n')\n f.write('# C0 (diskyness/boxyness), \\n')\n f.write('# Fn (n=integer, Azimuthal Fourier Modes),\\n')\n f.write('# R0-R10 (PA rotation, for creating spiral structures).\\n')\n f.write('# \\n')\n f.write('# -----------------------------------------------------------------------------\\n')\n f.write('# par) par value(s) fit toggle(s) # parameter description \\n')\n f.write('# -----------------------------------------------------------------------------\\n')\n\n f.write('\\n')\n f.write('# Object number: 1\\n')\n f.write(' 0) sersic \\n')\n f.write(' 1) %7.1f %7.1f 1 1 \\n' % (config['x'], config['y']))\n f.write(' 3) %7.3f 1 \\n' % (config['mag']+1.2))\n f.write(' 4) %7.3f 1 \\n' % (config['re']*0.25))\n f.write(' 5) %7.3f 1 \\n' % config['nser'])\n f.write(' 6) 0.0000 0 # ----- \\n')\n f.write(' 7) 0.0000 0 # ----- \\n')\n f.write(' 8) 0.0000 0 # ----- \\n')\n f.write(' 9) %7.3f 1 \\n' % config['ba'])\n f.write('10) %7.3f 1 \\n' % config['pa'])\n if useF1:\n f.write('F1) 0.01 10.00 1 1 ')\n if useF4:\n f.write('F4) 0.01 10.00 1 1 ')\n f.write(' Z) 0 # output option (0 = resid., 1 = Dont subtract) \\n')\n\n f.write('# Object number: 2\\n')\n f.write(' 0) sersic \\n')\n f.write(' 1) %7.1f %7.1f 1 1 \\n' % (config['x'], config['y']))\n f.write(' 3) %7.3f 1 \\n' % (config['mag']+0.9))\n f.write(' 4) %7.3f 1 \\n' % (config['re']*0.9))\n f.write(' 5) 0.9 1 \\n')\n f.write(' 6) 0.0000 0 # ----- \\n')\n f.write(' 7) 0.0000 0 # ----- \\n')\n f.write(' 8) 0.0000 0 # ----- \\n')\n f.write(' 9) %7.3f 1 \\n' % config['ba'])\n f.write('10) %7.3f 1 \\n' % config['pa'])\n if useF1:\n f.write('F1) 0.01 10.00 1 1 ')\n if useF4:\n f.write('F4) 0.01 10.00 1 1 ')\n f.write(' Z) 0 # output option (0 = resid., 1 = Dont subtract) \\n')\n f.write('\\n')\n\n f.write('# Object number: 3\\n')\n f.write(' 0) sersic \\n')\n f.write(' 1) %7.1f %7.1f 1 1 \\n' % (config['x'], config['y']))\n f.write(' 3) %7.3f 1 \\n' % (config['mag']+0.7))\n f.write(' 4) %7.3f 1 \\n' % (config['re']*1.3))\n f.write(' 5) 0.5 1 \\n')\n f.write(' 6) 0.0000 0 # ----- \\n')\n f.write(' 7) 0.0000 0 # ----- \\n')\n f.write(' 8) 0.0000 0 # ----- \\n')\n f.write(' 9) %7.3f 1 \\n' % config['ba'])\n f.write('10) %7.3f 1 \\n' % config['pa'])\n if useF1:\n f.write('F1) 0.01 10.00 1 1 ')\n if useF4:\n f.write('F4) 0.01 10.00 1 1 ')\n f.write(' Z) 0 # output option (0 = resid., 1 = Dont subtract) \\n')\n f.write('\\n')\n\n if config['usesky'] == 1:\n f.write('# Object number: 3\\n')\n f.write(' 0) sky # object type\\n')\n f.write(' 1) %8.3f 1 # sky background \\n' % config['bkg'])\n if skyGrad:\n f.write(' 2) 0.0000 1 # dsky/dx (sky gradient in x)\\n')\n f.write(' 3) 0.0000 1 # dsky/dy (sky gradient in y)\\n')\n else:\n f.write(' 2) 0.0000 0 # dsky/dx (sky gradient in x)\\n')\n f.write(' 3) 0.0000 0 # dsky/dy (sky gradient in y)\\n')\n f.write(' Z) 0 # output option (0 = resid., 1 = Dont subtract) \\n')\n f.write('\\n')\n f.write('================================================================================\\n')\n\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates empty numpy array for static mask array signature.
def _buildMaskArray(self,signature): return np.ones(signature[1],dtype=np.int16)
[ "def getmaskarray(self):\n return Array._from_apply(\"wf.maskedarray.getmaskarray\", self)", "def _create_mask(self, data):\n sequence_lengths = [len(sample) for sample in data]\n max_sequence_length = max(sequence_lengths)\n mask = np.zeros((max_sequence_length, len(data)), dtype=config.floatX)\n for i, sequence_length in enumerate(sequence_lengths):\n mask[:sequence_length, i] = 1\n return mask", "def _make_standard_atom_mask() -> np.ndarray:\n # +1 to account for unknown (all 0s).\n mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)\n for restype, restype_letter in enumerate(restypes):\n restype_name = restype_1to3[restype_letter]\n atom_names = residue_atoms[restype_name]\n for atom_name in atom_names:\n atom_type = atom_order[atom_name]\n mask[restype, atom_type] = 1\n return mask", "def test_masked_arr_returned():\n\n masked = make_apply_mask(im, im_mask, [0, 4])\n assert np.ma.is_masked(masked)", "def getMaskArray(self, signature):\n if signature in self.masklist:\n mask = self.masklist[signature]\n else:\n mask = None\n return mask", "def to_masked_array(self):\n return np.ma.array(self.data, mask=self.mask,\n fill_value=self.fill_value)", "def make_mask(size, idx_true=None):\r\n\r\n # TODO: make work for n dimensional? is this something the np.ma module could do better?\r\n\r\n if idx_true is None:\r\n idx_true = list(range(size))\r\n\r\n mask = []\r\n for i in range(size):\r\n if i in idx_true:\r\n mask += [True]\r\n else:\r\n mask += [False]\r\n return np.array(mask)", "def __generate_masked_image(self) -> numpy.ndarray:\n modified_image = self.__image.copy()\n\n for channel_index in range(modified_image.shape[2]):\n channel = modified_image[:, :, channel_index]\n channel[self.__mask == 255] = numpy.nan\n modified_image[:, :, channel_index] = channel\n\n return modified_image", "def np_empty(shape: tuple = None, dtype: np.dtype = None, order: str = 'C'):\n return np.empty(shape, dtype, order)", "def inplace_maskarray(array, mask):\n\n if len(mask) != len(array):\n raise ValueError('Lengths must match')\n elif array.ndim != 2:\n raise ValueError('Can only take a 2 dimensional-array.')\n\n # Cython doesn't support bool arrays, so this does a no-copy type casting.\n uints = _np.frombuffer(mask, dtype=_np.uint8)\n index = _overwrite_matrix(array, uints)\n array.resize((index, array.shape[1]), refcheck=False)\n return array", "def empty(shape, dtype=None, order=None):\n return ndarray(shape, dtype, order=order)", "def _get_new_empty_results_array(self):\n return np.empty((self.num_methods, 0), dtype=np.object)", "def test_non_masked():\n data = np.array(10)\n result = nan_mask(data)\n assert result is data", "def createMaskedArray(path):\r\n\r\n print \"[+] Creating masked array for: {0}\".format(path)\r\n dataset = gdal.Open(path)\r\n\r\n if dataset is None:\r\n raise Exception()\r\n\r\n # Get geotransform data { top-left point coordinates and cell size }\r\n geotransform = dataset.GetGeoTransform()\r\n\r\n # Working on the first band\r\n band = dataset.GetRasterBand(1)\r\n #Store nodata value, for masking\r\n nodata = band.GetNoDataValue()\r\n # Load as array\r\n raster = band.ReadAsArray(0, 0, band.XSize, band.YSize)\r\n # Closing database\r\n dataset = None\r\n masked_raster = ma.masked_values(raster, nodata, copy=False)\r\n masked_raster.fill_value = nodata\r\n print \"[+] Returning masked raster\"\r\n return masked_raster, geotransform", "def empty_image(request):\n channels = request.param\n data_shape = (4, 8, 12, channels)\n return np.zeros(data_shape).astype(ART_NUMPY_DTYPE)", "def zeros(self, *args, **kwargs):\n return self.nplike_lib.zeros(*args, **kwargs)", "def _get_mask(x: np.ndarray, **kwargs) -> np.ndarray:\n mask = kwargs.get(\"mask\")\n\n if mask is not None:\n if mask.ndim > x.ndim:\n raise ValueError(\"Mask shape must be broadcastable to input shape.\")\n\n if not (np.issubdtype(mask.dtype, np.floating) or mask.dtype == np.bool):\n raise ValueError(\n \"The `mask` has to be either of type np.float32, np.float64 or np.bool. The provided\"\n \"`mask` is of type {}.\".format(mask.dtype)\n )\n\n if np.issubdtype(mask.dtype, np.floating) and np.amin(mask) < 0.0:\n raise ValueError(\n \"The `mask` of type np.float32 or np.float64 requires all elements to be either zero\"\n \"or positive values.\"\n )\n\n return mask", "def calc_mask(self):\n if self.max_shape is None:\n raise NotImplementedError(\"Generic Xpad detector does not\"\n \" know the max size ...\")\n mask = numpy.zeros(self.max_shape, dtype=numpy.int8)\n # workinng in dim0 = Y\n for i in range(0, self.max_shape[0], self.MODULE_SIZE[0]):\n mask[i, :] = 1\n mask[i + self.MODULE_SIZE[0] - 1, :] = 1\n # workinng in dim1 = X\n for i in range(0, self.max_shape[1], self.MODULE_SIZE[1]):\n mask[:, i ] = 1\n mask[:, i + self.MODULE_SIZE[1] - 1] = 1\n return mask", "def get_np_filled(input_data):\n separated = []\n for line in input_data:\n separated.append(list(line))\n\n # Make the data into a uniform square with 0's as filler\n data = np.array([np.array(line) for line in separated])\n lens = np.array([len(i) for i in data])\n mask = np.arange(lens.max()) < lens[:, None]\n out = np.zeros(mask.shape, dtype=data.dtype)\n out[mask] = np.concatenate(data)\n return out", "def zeros(shape: ConvertibleToShape, dtype: Any = float,\n order: str = \"C\") -> Array:\n # https://github.com/python/mypy/issues/3186\n return full(shape, 0, dtype) # type: ignore" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the appropriate StaticMask array for the image.
def getMaskArray(self, signature): if signature in self.masklist: mask = self.masklist[signature] else: mask = None return mask
[ "def getmaskarray(self):\n return Array._from_apply(\"wf.maskedarray.getmaskarray\", self)", "def mrcnn_masks(self):\n masks = []\n klasses = []\n for b in self.buildings:\n if b.color() == 5:\n continue\n img = np.zeros(S.MASKSHAPE[:2], dtype=np.uint8)\n coords = b.coords()\n if len(coords) > 0:\n cv2.fillPoly(img, np.array([coords]), 1)\n masks.append(img)\n klasses.append(b.color())\n if len(masks) == 0:\n return np.array(masks, copy=False), np.ones([0], dtype=np.int32)\n #masks.append(np.zeros(MASKSHAPE[:2], dtype=np.uint8))\n return np.dstack(masks).astype(bool), np.array(klasses)#np.ones([len(masks)], dtype=np.int32)", "def get_masks(target):\n path = STYLE_MASK_PATH if target else CONTENT_MASK_PATH\n masks = [f for f in os.listdir(path) if is_jpg_mask(f)]\n return masks", "def GetMaskImage(self) -> \"itkImageSS3 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUS3ISS3_GetMaskImage(self)", "def GetMaskImage(self) -> \"itkImageSS3 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterISS3ISS3_GetMaskImage(self)", "def load_static_badpix():\n par = common.pc_params()\n\n fname = os.path.join(os.environ[par['meta_env_var']],\n par['static_mask_filename'])\n\n assert(os.path.exists(fname))\n\n mask = fits.getdata(fname)\n\n return mask", "def get_masks(self):\n x = self.getxrvar('mask')\n ds = xr.Dataset()\n\n for cv_set in ['train', 'valid', 'test', 'all']:\n indices = self.space_indices[cv_set]\n mask = np.zeros((len(x.lat), len(x.lon)), dtype=int)\n for (lat, lon) in indices:\n mask[lat, lon] = 1\n ds[cv_set] = xr.DataArray(mask, coords=[x.lat, x.lon])\n\n return ds", "def GetMaskImage(self) -> \"itkImageSS3 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUC3ISS3_GetMaskImage(self)", "def _load_mask(self, gt_data):\n img_coco = self.refexp_dataset.loadImgs(ids=gt_data['image_id'])[0]\n mask = Image.new('L', (img_coco['width'], img_coco['height']), 0)\n for seg in gt_data['segmentation']:\n ImageDraw.Draw(mask).polygon(seg, outline='white', fill='white')\n return numpy.asarray(mask)", "def getAsMaskImage(self):\n\t\tif not self.isROI():\n\t\t\treturn None\n\t\tinsideMap = {}\n\t\tinsideMap.update(self.getCoveredPoints())\n\t\tinsMap = {}\n\t\tfor x, y in insideMap.keys():\n\t\t\tinsMap[(x, y)] = 1\n\t\tparent = self.GetCanvas()\n\t\tmx, my, mz = parent.dataUnit.getDimensions()\n\t\treturn lib.ImageOperations.getMaskFromPoints(insMap, mx, my, mz)", "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUS3IUS3_GetMaskImage(self)", "def GetMaskImage(self) -> \"itkImageSS2 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUS2ISS2_GetMaskImage(self)", "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIF3IUS3_GetMaskImage(self)", "def GetMaskImage(self) -> \"itkImageSS2 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIF2ISS2_GetMaskImage(self)", "def GetMaskImage(self) -> \"itkImageSS2 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterISS2ISS2_GetMaskImage(self)", "def mask(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.mask\", self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n mask_index = p2e._base._util._convert_str_to_type(val, int)\r\n return p2e.model._masks[mask_index]", "def _make_standard_atom_mask() -> np.ndarray:\n # +1 to account for unknown (all 0s).\n mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)\n for restype, restype_letter in enumerate(restypes):\n restype_name = restype_1to3[restype_letter]\n atom_names = residue_atoms[restype_name]\n for atom_name in atom_names:\n atom_type = atom_order[atom_name]\n mask[restype, atom_type] = 1\n return mask", "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUC3IUS3_GetMaskImage(self)", "def test_masked_arr_returned():\n\n masked = make_apply_mask(im, im_mask, [0, 4])\n assert np.ma.is_masked(masked)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the name of the output mask file that should reside on disk for the given signature.
def getFilename(self,signature): filename=constructFilename(signature) if(fileutil.checkFileExists(filename)): return filename else: print("\nmMask file for ", str(signature), " does not exist on disk", file=sys.stderr) return None
[ "def _get_output_file_name(self):\n datetime_suffix = datetime.now().strftime('%Y%m%d_%H%M%S')\n\n # Only select the non-empty strings from the file name parts\n output_file_name = '_'.join([a for a in\n [self.output_file_name_prefix, self.output_file_name,\n self.output_file_name_suffix, datetime_suffix] if a\n ])\n\n return f\"{output_file_name}{self._get_output_file_extension()}\"", "def mask_filepath(self, i: int) -> str:\n img_fn: str = self.image_meta(i)[COCODataset._KEY_FILE_NAME]\n return os.path.join(self.masks_root, img_fn)", "def name_file(self, output_filename):\n return self.output_path / output_filename", "def get_file_name(self, output_dir, model_name):\n file_name = \"%sGroup%s_Seg%s_%s.dat\" % (model_name, self.group, self.segment, self.data_name)\n return os.path.join(output_dir, file_name)", "def maskname(self) -> Optional[str]:\n try:\n maskid = self._data['maskname']\n if not maskid.endswith('.mat'):\n maskid = maskid + '.mat'\n return maskid\n except KeyError:\n return None", "def get_fname_mcm(self, mask1, mask2, jk_region=None):\n fname = self.get_outdir()+\"mcm_\"+mask1+\"_\"+mask2\n if jk_region is not None:\n fname += \"_jk%d\" % jk_region\n fname += \".mcm\"\n return fname", "def getFilenamePattern(self):\n\t\treturn self.format", "def archive_filename(self):\n return \"%s_%s_%s_%s.png\" % (LABELS[self.metadata['creating_entity']],\n SECTORS[self.metadata['sector']],\n CHANNELS[self.metadata['channel']],\n self.metadata['valid'].strftime(\"%Y%m%d%H%M\"))", "def define_output_name(fname):\n phdr = pyfits.getheader(fname,ext=0)\n if 'D001DATA' in phdr:\n outname = phdr['D001DATA']\n if outname.find('['):outname = outname.split('[')[0]\n else:\n frootname = fname.split('_')[0]\n outname = fileutil.buildRootname(frootname)\n if outname is None:\n # make one from the header keywords\n outname = phdr['rootname']\n del phdr\n\n return outname", "def get_rotated_out_filename(self):\n\n basename = self._output_file\n if self._has_rotated_stdout_err_files:\n basename += \".%03d\" % (self._job_output_counter)\n\n return basename", "def filename(self):\n return f\"{self.sha}{self.extension}\"", "def get_save_image_name(org_im, org_im_path, output_dir):\n # name prefix of orginal image\n org_im_name = os.path.split(org_im_path)[-1]\n im_prefix = os.path.splitext(org_im_name)[0]\n ext = '.png'\n # save image path\n save_im_path = os.path.join(output_dir, im_prefix + ext)\n if os.path.exists(save_im_path):\n save_im_path = os.path.join(output_dir, im_prefix + 'time={}'.format(int(time.time())) + ext)\n\n return save_im_path", "def GetFilePath(signature):\n base = 'chromium/../../'\n match = re.findall(re.escape(base) + '.*\\|', signature)\n return match[-1][len(base):-1]", "def magic(self):\n try:\n with magic.Magic() as m:\n return m.id_filename(self.path)\n except Exception:\n return ''", "def __generate_file_name(hackathon_name, file_type, file_name):\n if file_type == FILE_TYPE.HACK_IMAGE:\n suffix = file_name.split('.')[-1]\n hackathon_name = \"\" if hackathon_name is None else hackathon_name + \"/\"\n real_name = hackathon_name + str(uuid1())[0:9] + strftime(\"%Y%m%d%H%M%S\") + \".\" + suffix\n return real_name\n else:\n return file_name", "def getFilename(self, frameNum):\n\t\treturn self.format % (self.dirname, self.frameName, frameNum)", "def _get_plot_figure_name(self, output_folder=None,\n figure_name_appendix=\"\"):\n if isinstance(output_folder, types.NoneType):\n output_folder = os.path.join(self.output_folder_path, \"slices\")\n check_folder(output_folder, False, True)\n fname = \"post_analysis_%s_%s_tf%s%s.pdf\" % (\n self.observable_name_compact, self.analysis_data_type,\n str(self.interval_index).replace(\".\", \"_\"),\n figure_name_appendix)\n return os.path.join(output_folder, fname)", "def get_temp_filename():\r\n # Disable pesky warnings about os.tempnam()\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n\r\n # Save the bins image in a temporary file\r\n filename = os.tempnam(\"autobga\",\"tmpoutimg\") + \".png\"\r\n \r\n return filename", "def get_output_basename(self):\n cumf_base_name = self.options[\"full_task_name\"]\n cumf_base_name = re.sub(r\"[() ]\", r\"_\", cumf_base_name)\n if cumf_base_name.endswith(\"_\"):\n cumf_base_name = cumf_base_name[:-1]\n return \"ana.\" + cumf_base_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete just the mask that matches the signature given.
def deleteMask(self,signature): if signature in self.masklist: self.masklist[signature] = None else: log.warning("No matching mask")
[ "def seam_removal_mask(self, remove_pix, mask):\n m, n = mask.shape\n output = np.zeros((m, n - 1))\n for row in range(m):\n col = remove_pix[row]\n output[row, :] = np.delete(mask[row, :], [col])\n mask = np.copy(output)\n return mask", "def delete_masked_points(*args):\n masks = [ma.getmaskarray(x) for x in args if hasattr(x, 'mask')]\n if len(masks) == 0:\n return args\n mask = reduce(ma.mask_or, masks)\n margs = []\n for x in args:\n if (not is_string_like(x)\n and iterable(x)\n and len(x) == len(mask)):\n if (hasattr(x, 'get_compressed_copy')):\n compressed_x = x.get_compressed_copy(mask)\n else:\n compressed_x = ma.masked_array(x, mask=mask).compressed()\n margs.append(compressed_x)\n else:\n margs.append(x)\n return margs", "def getMaskArray(self, signature):\n if signature in self.masklist:\n mask = self.masklist[signature]\n else:\n mask = None\n return mask", "def remove_mask(context):\n\n anim_offset = context.scene.animaide.anim_offset\n blends_action = bpy.data.actions.get('animaide')\n blends_curves = getattr(blends_action, 'fcurves', None)\n\n anim_offset.mask_in_use = False\n if blends_curves is not None and len(blends_curves) > 0:\n blends_curves.remove(blends_curves[0])\n # reset_timeline_mask(context)\n\n return", "def clearReg(address, mask, length=32):\r\n andReg(address, ~mask, length)", "def server_side_sfsa_remove_mutual_mask(FEDSUBAVG_SERVER_STORAGE, FEDSUBAVG_ROUND_STORAGE, fedsubavg_security_para_dict,\\\r\n FEDSUBAVG_DHKE):\r\n # Deal with special case: no client drops in U2\r\n if len(FEDSUBAVG_ROUND_STORAGE['U1\\U2']) == 0:\r\n return\r\n\r\n fedsubavg_all_s_shares = []\r\n for client_index in FEDSUBAVG_ROUND_STORAGE['U3']:\r\n fedsubavg_all_s_shares.append(FEDSUBAVG_SERVER_STORAGE[client_index]['drop_s_shares'])\r\n fedsubavg_s_shares_dict = {\r\n k: [d.get(k) for d in fedsubavg_all_s_shares]\r\n for k in set().union(*fedsubavg_all_s_shares) # U1\\U2\r\n }\r\n # Reconstruct and remover each mutual mask (pair of each dropped client in U1/U2 with each live client in U2)\r\n # First, reconstruct mutual mask for each dropped client, parallel is easy\r\n # NOT U3!!!!!!! SHOULD BE U2, those clients who send masked input y.\r\n # Cannot directly fetch this dropped client's submodel shape, use global model shape to reconstruct!\r\n U2 = FEDSUBAVG_ROUND_STORAGE['U2']\r\n U2_fedsubavg_spk_dict = {k: FEDSUBAVG_SERVER_STORAGE[k]['spk'] for k in U2}\r\n fedsubavg_s_mask_dict_list = []\r\n for client_index_drop in FEDSUBAVG_ROUND_STORAGE['U1\\U2']:\r\n fedsubavg_s_mask_dict = server_side_sfsa_reconstruct_single_mutual_mask(client_index_drop,\\\r\n fedsubavg_s_shares_dict[client_index_drop],\\\r\n FEDSUBAVG_ROUND_STORAGE['global_model_shape'], \\\r\n FEDSUBAVG_SERVER_STORAGE[client_index_drop]['ids_info'], \\\r\n U2, FEDSUBAVG_SERVER_STORAGE[client_index_drop]['mutual_mask_itemID_client_indices'],\\\r\n FEDSUBAVG_SERVER_STORAGE[client_index_drop]['mutual_mask_cateID_client_indices'],\\\r\n U2_fedsubavg_spk_dict, fedsubavg_security_para_dict, FEDSUBAVG_DHKE)\r\n fedsubavg_s_mask_dict_list.append(fedsubavg_s_mask_dict)\r\n # Second, remove mutual mask for each dropped client, parallel is hard\r\n for idx, client_index_drop in enumerate(FEDSUBAVG_ROUND_STORAGE['U1\\U2']):\r\n server_side_sfsa_remove_single_mutual_mask(fedsubavg_s_mask_dict_list[idx],\\\r\n FEDSUBAVG_SERVER_STORAGE[client_index_drop]['ids_info'], FEDSUBAVG_ROUND_STORAGE)", "def unset_flag(self, flag):\n self.mask = bytes([ord(self.mask) ^ ord(flag)])", "def abnormal_cycle_removing(signal,abnormality_index_total):\n \n if len(signal.shape)==4:\n single_abnormality_removed_signal=np.delete(signal,abnormality_index_total,3)\n elif len(signal.shape)==1:\n single_abnormality_removed_signal=np.delete(signal,abnormality_index_total,0)\n\n return single_abnormality_removed_signal", "def badPixelRemove(image, dq):\n meanImage = (np.roll(image, 1, axis = 0) + np.roll(image, -1, axis = 0) + np.roll(image, 1, axis = 1) + np.roll(image, -1, axis = 1)) #array that the values are the\n #dqbin = ['{0:016b}'.format(i) for i in dq.flat]\n #isBad = np.array([True if dqstr[-5] == '1' or dqstr[-6] == '1' else False for dqstr in dqbin]).reshape(np.shape(dq))\n image[dq == 40] = meanImage[dq == 40]\n return image", "def delete_rows(self, mask):\n assert(len(mask) == self.n_row), 'expected mask with %d rows but found %d' % (self.n_row, len(mask))\n row_subset = [ i for i in range(self.n_row) if not mask[i] ]\n for c in self.column_list:\n self.data[c] = np.array([ self.data[c][r] for r in row_subset ], dtype=self.column_type[c])\n self.n_row = len(row_subset)\n logger.info('%d rows deleted, new length is %d', sum(mask), self.n_row)", "def unmask(self):\n warnings.warn(\"Setting entire mask to False. Be careful..\")\n self.data.mask = False", "def unsign(self):\n self._remove_signature_data()", "def magnitude_prune(masking, mask, weight, name):\n num_remove = math.ceil(masking.name2prune_rate[name]*masking.name2nonzeros[name])\n num_zeros = masking.name2zeros[name]\n k = math.ceil(num_zeros + num_remove)\n if num_remove == 0.0: return weight.data != 0.0\n\n x, idx = torch.sort(torch.abs(weight.data.view(-1)))\n mask.data.view(-1)[idx[:k]] = 0.0\n return mask", "def handle_SExtractor_mask(stars, thresh):\r\n mask = np.ones(stars.shape)\r\n mask[stars < thresh] = 0\r\n stars[stars < thresh] = 0\r\n return mask", "def decode_mask(mask): # real signature unknown; restored from __doc__\n pass", "def remove_offsets(self, robust=None):\n self.remove_drifts(target_frame_resolution=self.size, robust=robust)", "def remove_rejects(bitmask, sky_flux):\n bad_pix = []\n for pix, bit in enumerate(bitmask):\n flags = b.decode_bitmask(b.SPPIXMASK,bit)\n for flag in flags:\n if flag == 'FULLREJECT' or flag == 'COMBINEREJ':\n bad_pix.append(pix)\n sky_flux[bad_pix] = 0\n return sky_flux", "def server_side_sfsa_remove_single_mutual_mask(fedsubavg_s_mask_dict, client_ids_info, FEDSUBAVG_ROUND_STORAGE):\r\n # Load single mutual mask\r\n weighted_delta_submodel_s_mask = fedsubavg_s_mask_dict['weighted_delta_submodel']\r\n perturbed_itemIDs_count_s_mask = fedsubavg_s_mask_dict['perturbed_itemIDs_count']\r\n perturbed_cateIDs_count_s_mask = fedsubavg_s_mask_dict['perturbed_cateIDs_count']\r\n\r\n # Load ids for mapping to ps index system\r\n perturbed_itemIDs = client_ids_info['perturbed_itemIDs']\r\n perturbed_cateIDs = client_ids_info['perturbed_cateIDs']\r\n\r\n # Load global model and count numbers in z_dict\r\n z_dict_original = FEDSUBAVG_ROUND_STORAGE['z_dict']\r\n gathered_weighted_delta_submodel = z_dict_original['gathered_weighted_delta_submodel']\r\n gathered_itemIDs_count = z_dict_original['gathered_itemIDs_count']\r\n gathered_cateIDs_count = z_dict_original['gathered_cateIDs_count']\r\n\r\n # Remove starts, Please convert client's index system to ps's global index system\r\n for layer, para_s_mask in enumerate(weighted_delta_submodel_s_mask):\r\n if layer == 0: # embedding for user id\r\n continue\r\n elif layer == 1: # embedding for item ids\r\n for client_item_index in range(len(para_s_mask)):\r\n ps_item_index = perturbed_itemIDs[client_item_index]\r\n gathered_weighted_delta_submodel[layer][ps_item_index] -= para_s_mask[client_item_index]\r\n gathered_itemIDs_count[ps_item_index] -= perturbed_itemIDs_count_s_mask[client_item_index]\r\n elif layer == 2: # embedding for cate ids\r\n for client_cate_index in range(len(para_s_mask)):\r\n ps_cate_index = perturbed_cateIDs[client_cate_index]\r\n gathered_weighted_delta_submodel[layer][ps_cate_index] -= para_s_mask[client_cate_index]\r\n gathered_cateIDs_count[ps_cate_index] -= perturbed_cateIDs_count_s_mask[client_cate_index]\r\n else:\r\n gathered_weighted_delta_submodel[layer] -= para_s_mask\r\n\r\n #Update global z_dict in FEDSUBAVG_ROUND_STORAGE\r\n FEDSUBAVG_ROUND_STORAGE['z_dict']['gathered_weighted_delta_submodel'] = gathered_weighted_delta_submodel\r\n FEDSUBAVG_ROUND_STORAGE['z_dict']['gathered_itemIDs_count'] = gathered_itemIDs_count\r\n FEDSUBAVG_ROUND_STORAGE['z_dict']['gathered_cateIDs_count'] = gathered_cateIDs_count", "def server_side_sfsa_remove_self_mask(FEDSUBAVG_SERVER_STORAGE, FEDSUBAVG_ROUND_STORAGE, fedsubavg_security_para_dict):\r\n # Gather b_shares for each live clients\r\n fedsubavg_all_b_shares = []\r\n for client_index in FEDSUBAVG_ROUND_STORAGE['U3']:\r\n fedsubavg_all_b_shares.append(FEDSUBAVG_SERVER_STORAGE[client_index]['live_b_shares'])\r\n fedsubavg_b_shares_dict = {\r\n k: [d.get(k) for d in fedsubavg_all_b_shares]\r\n for k in set().union(*fedsubavg_all_b_shares) # U2\r\n }\r\n\r\n # Reconstruct and remove each self mask by PRNG expanding using the seed b\r\n # NOT U3!!!!!!! SHOULD BE U2, those clients who send masked input y\r\n # First, reconstruct self mask, parallel is easy\r\n fedsubavg_b_mask_dict_list = []\r\n for client_index in FEDSUBAVG_ROUND_STORAGE['U2']:\r\n fedsubavg_b_mask_dict = server_side_sfsa_reconstruct_single_self_mask(fedsubavg_b_shares_dict[client_index], \\\r\n FEDSUBAVG_SERVER_STORAGE[client_index]['submodel_shape'], \\\r\n FEDSUBAVG_SERVER_STORAGE[client_index]['perturbed_itemIDs_size'], \\\r\n FEDSUBAVG_SERVER_STORAGE[client_index]['perturbed_cateIDs_size'], \\\r\n fedsubavg_security_para_dict)\r\n fedsubavg_b_mask_dict_list.append(fedsubavg_b_mask_dict)\r\n # Second, remove self mask from global model, parallel is hard\r\n for idx, client_index in enumerate(FEDSUBAVG_ROUND_STORAGE['U2']):\r\n server_side_sfsa_remove_single_self_mask(fedsubavg_b_mask_dict_list[idx], \\\r\n FEDSUBAVG_SERVER_STORAGE[client_index]['ids_info'], FEDSUBAVG_ROUND_STORAGE)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves the static mask to a file it uses the signatures associated with each mask to contruct the filename for the output mask image.
def saveToFile(self,imageObjectList): virtual = imageObjectList[0].inmemory for key in self.masklist.keys(): #check to see if the file already exists on disk filename = self.masknames[key] #create a new fits image with the mask array and a standard header #open a new header and data unit newHDU = fits.PrimaryHDU() newHDU.data = self.masklist[key] if virtual: for img in imageObjectList: img.saveVirtualOutputs({filename:newHDU}) else: try: newHDU.writeto(filename, overwrite=True) log.info("Saving static mask to disk: %s" % filename) except IOError: log.error("Problem saving static mask file: %s to " "disk!\n" % filename) raise IOError
[ "def save_mask(self, i: int, mask: PIL.Image.Image) -> None:\n mask.save(self.mask_filepath(i))", "def save_masked_images(self, sframe, file_name):\n blurred_image = _visualization.draw_bounding_boxes(sframe['images'], sframe['annotations'])\n try:\n os.mkdir(self.save_dir + '/anonymized_images/' + file_name + '/')\n except FileExistsError:\n pass\n for i, j in enumerate(blurred_image):\n j.save(''.join([self.save_dir, '/anonymized_images/', file_name, '/', str(i), '.jpg']))", "def save_instance_mask(stack, class_ids, image_id, dir_name = 'cedars-224/masks_instance/',\\\n mode = -1, patch = -1):\n image_id = str(image_id)\n if mode == -1:\n mat_filename = image_id.zfill(4) + '_instance'\n save_path = os.path.join(os.getcwd(), dir_name + mat_filename)\n else:\n mat_filename = image_id.zfill(4) + '_' + str(patch).zfill(4) + '_instance'\n dir_name = 'cedars-224/masks_instance_mod_' + str(mode) + '/'\n \n save_path = os.path.join(os.getcwd(), dir_name + mat_filename)\n res_dict = {'segmentation': stack, 'class_ids': class_ids}\n scipy.io.savemat(save_path, res_dict)", "def write_mask(self, file_name, format=\"fits\"):\n mask = np.short(self.to_mask())\n if format == 'fits':\n pyfits.writeto(file_name, mask, clobber=True)\n else:\n raise AttributeError(\"format not supported: %s\" % format)", "def write_mask(self, file_name, format=\"fits\"):\n mask = np.short(self.to_mask())\n if format == 'fits':\n from astropy.io import fits\n try:\n fits.writeto(file_name, mask, overwrite=True)\n except TypeError:\n fits.writeto(file_name, mask, clobber=True)\n else:\n raise AttributeError(\"format not supported: %s\" % format)", "def create_mask_data(image,maskFile):\r\n # parse input\r\n plt.gray()\r\n plt.close()\r\n message = 'Click on points at the outer edge of the disk for mask'\r\n R,center = UIF.define_outer_edge(image,'circle',message)\r\n mask1 = create_circular_mask(image,R,center)\r\n plt.figure()\r\n plt.imshow(image)\r\n image = mask_image(image,mask1)\r\n plt.figure()\r\n plt.imshow(image)\r\n message = 'Click points around the nozzle and tubing for mask'\r\n points = UIF.define_outer_edge(image,'polygon',message)\r\n mask2, temp = create_polygon_mask(image,points)\r\n # invert polygon mask and combine with circle mask\r\n mask2 = (mask2 != True)\r\n mask = (mask2 == mask1)*mask1\r\n image = mask_image(image,mask)\r\n Fun.plt_show_image(image)\r\n \r\n maskData = {}\r\n maskData['mask'] = mask\r\n maskData['diskMask'] = mask1\r\n maskData['nozzleMask'] = mask2\r\n maskData['diskCenter'] = center\r\n maskData['diskRadius'] = R\r\n maskData['nozzlePoints'] = points\r\n maskData['maskRadius'] = R\r\n \r\n with open(maskFile,'wb') as f:\r\n pkl.dump(maskData,f)\r\n \r\n return maskData", "def mask_filepath(self, i: int) -> str:\n img_fn: str = self.image_meta(i)[COCODataset._KEY_FILE_NAME]\n return os.path.join(self.masks_root, img_fn)", "def masks_to_submission(submission_filename, *images):\n with open(submission_filename, 'w') as f:\n f.write('id,prediction\\n')\n i=int(0)\n for image in images[0:]:\n i+=1\n f.writelines('{}\\n'.format(s) for s in mask_to_submission_strings(image,i))", "def masks(self, args):\n if isinstance(args.object, ImageI):\n image_id = args.object.id\n image = self._lookup(self.gateway, \"Image\", image_id)\n self.ctx.out(\"Export Masks on Image: %s\" % image.name)\n image_masks_to_zarr(image, args)", "def createMask(input=None, static_sig=4.0, group=None, editpars=False, configObj=None, **inputDict):\n\n if input is not None:\n inputDict[\"static_sig\"]=static_sig\n inputDict[\"group\"]=group\n inputDict[\"updatewcs\"]=False\n inputDict[\"input\"]=input\n else:\n print >> sys.stderr, \"Please supply an input image\\n\"\n raise ValueError\n\n #this accounts for a user-called init where config is not defined yet\n configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))\n if configObj is None:\n return\n\n if not editpars:\n run(configObj)", "def maskSaver(nifti_paths: List[str], masktopdir: str, torch_model):\n if next(torch_model.parameters()).device == 'cpu':\n on_cpu = True\n else:\n on_cpu = False\n\n for nifti_path in nifti_paths:\n print(nifti_path)\n maskfilename = masktopdir + nifti_path[:-7].split(\"/\")[-1] + \"_3dResnetUNet_mask.nii.gz\"\n image = nib.load(nifti_path)\n if on_cpu:\n image_array = torch.tensor(np.array(image.dataobj), dtype=torch.float32).cpu()\n mask_array = torch.tensor(np.zeros(np.shape(image_array))).cpu()\n ten24 = torch.tensor(1024.).cpu()\n negten24 = torch.tensor(-1024.).cpu()\n else:\n image_array = torch.tensor(np.array(image.dataobj), dtype=torch.float32).cuda()\n mask_array = torch.tensor(np.zeros(np.shape(image_array))).cuda()\n ten24 = torch.tensor(1024.).cuda()\n negten24 = torch.tensor(-1024.).cuda()\n\n image_array = torch.where(image_array[...] > 1024., ten24, image_array)\n image_array = torch.where(image_array[...] < -1024., negten24, image_array)\n image_array = (image_array + 1024.) / 2048.\n\n axial_slices = image_array.size()[2]\n for z_slice in range(axial_slices):\n print(str(z_slice) + \"/\" + str(axial_slices-1))\n # generate mask for slice\n model_input = fullImageInputBuilder(z_slice, image_array)\n # adding the channel dimension\n model_input = torch.unsqueeze(model_input, dim=0).cuda()\n model_input = torch.cat((model_input, model_input, model_input), dim=0)\n # adding the batch dimension\n model_input = torch.unsqueeze(model_input, dim=0)\n # writing the slice prediction to the mask\n slice_mask = torch.argmax(torch_model(model_input), dim=1)\n mask_array[:, :, z_slice] = slice_mask[0, 0, ...]\n\n mask_nifti = nib.Nifti1Image(mask_array.cpu().numpy(), image.affine)\n nib.save(mask_nifti, maskfilename)\n return None", "def apply_mask_to_datasets(self):\n if self.mask_active:\n # Load mask data\n if len(self.mask_file_path) > 0:\n ext = os.path.splitext(self.mask_file_path)[-1].lower()\n msg = \"\"\n try:\n if \".npy\" == ext:\n self.mask_data = np.load(self.mask_file_path)\n elif \".txt\" == ext:\n self.mask_data = np.loadtxt(self.mask_file_path)\n else:\n self.mask_data = np.array(Image.open(self.mask_file_path))\n\n for k in self.data_sets.keys():\n self.data_sets[k].set_mask(mask=self.mask_data, mask_active=self.mask_active)\n\n # TODO: remove the following code if not needed\n # I see no reason of adding the mask image to every processed dataset\n # for k in self.img_dict.keys():\n # if 'fit' in k:\n # self.img_dict[k][\"mask\"] = self.mask_data\n\n except IOError as ex:\n msg = f\"Mask file '{self.mask_file_path}' cannot be loaded: {str(ex)}.\"\n except Exception as ex:\n msg = f\"Mask from file '{self.mask_file_path}' cannot be set: {str(ex)}.\"\n if msg:\n logger.error(msg)\n self.mask_data = None\n self.mask_active = False # Deactivate the mask\n # Now raise the exception so that proper error processing can be performed\n raise RuntimeError(msg)\n\n logger.debug(f\"Mask was successfully loaded from file '{self.mask_file_path}'\")\n else:\n # We keep the file name, but there is no need to keep the data, which is loaded from\n # file each time the mask is loaded. Mask is relatively small and the file can change\n # between the function calls, so it's better to load new data each time.\n self.mask_data = None\n # Now clear the mask in each dataset\n for k in self.data_sets.keys():\n self.data_sets[k].set_mask(mask=self.mask_data, mask_active=self.mask_active)\n\n # TODO: remove the following code if not needed\n # There is also no reason to remove the mask image if it was not added\n # for k in self.img_dict.keys():\n # if 'fit' in k:\n # self.img_dict[k][\"mask\"] = self.mask_data\n\n logger.debug(\"Setting spatial ROI ...\")\n logger.debug(f\" ROI selection is active: {self.roi_selection_active}\")\n logger.debug(f\" Starting position: ({self.roi_row_start}, {self.roi_col_start})\")\n logger.debug(f\" Ending position (not included): ({self.roi_row_end}, {self.roi_col_end})\")\n\n try:\n for k in self.data_sets.keys():\n self.data_sets[k].set_selection(\n pt_start=(self.roi_row_start, self.roi_col_start),\n pt_end=(self.roi_row_end, self.roi_col_end),\n selection_active=self.roi_selection_active,\n )\n except Exception as ex:\n msg = f\"Spatial ROI selection can not be set: {str(ex)}\\n\"\n logger.error(msg)\n raise RuntimeError(ex)\n\n # TODO: it may be more logical to pass the data somewhere else. We leave it here for now.\n # Select raw data to for single pixel fitting.\n self.data_all = self.data_sets[self.selected_file_name].raw_data\n\n # Create Dask client to speed up processing of multiple datasets\n client = dask_client_create()\n # Run computations with the new selection and mask\n # ... for the dataset selected for processing\n self.data, self.data_total_count = self.data_sets[self.selected_file_name].get_total_spectrum_and_count(\n client=client\n )\n # ... for all datasets selected for preview except the one selected for processing.\n for key in self.data_sets.keys():\n if (key != self.selected_file_name) and self.data_sets[key].selected_for_preview:\n self.data_sets[key].update_buffers(client=client)\n client.close()", "def createFuncBrainMask(self):\n # make sure mask output dir exists\n self.createMaskOutputDir()\n\n # specify path to example func image\n exampleFunc = join(self.outputDir, 'exampleFunc.nii.gz')\n self.logger.info('creating whole brain mask from: {}'.format(exampleFunc))\n\n # run fsl bet command to create whole brain mask\n outputFile = join(self.maskOutputDir, 'wholeBrain_FUNC')\n cmdList = ['bet', exampleFunc, outputFile, '-n', '-m']\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n\n self.logger.info('created func brain mask: {}'.format(outputFile))", "def save_masked_volume(data, mask_url, path, descrip=None):\n rmask = load(mask_url)\n mask = rmask.get_data()\n shape = rmask.get_shape()\n affine = rmask.get_affine()\n save_volume(shape, path, affine, mask, data, descrip)", "def displayMasks(self):\n # figure out appropriate BG image\n if self.settings['transformMaskToFunc']:\n cmd = ['fsleyes', join(self.outputDir, 'hires_FUNC.nii.gz')]\n else:\n cmd = ['fsleyes', join(self.outputDir, 'exampleFunc.nii.gz')]\n\n # add whole brain mask, if specified\n if self.settings['createFuncBrainMask']:\n cmd.append(join(self.maskOutputDir, 'wholeBrain_FUNC_mask.nii.gz'))\n cmd.append('-cm')\n cmd.append('yellow')\n\n # add the transformed masks (weighted and binarized both), if specified\n if self.settings['transformMaskToFunc']:\n cmd.append(join(self.maskOutputDir, (self.settings['outputPrefix'] + '_FUNC_mask.nii.gz')))\n cmd.append('-cm')\n cmd.append('red')\n\n cmd.append(join(self.maskOutputDir, (self.settings['outputPrefix'] + '_FUNC_weighted.nii.gz')))\n cmd.append('-cm')\n cmd.append('hot')\n\n # call the fsleyes cmd\n subprocess.call(cmd)", "def markSim(home, runName):\n\n reset = os.getcwd()\n os.chdir(home + runName)\n simmarkedFolder = home + runName + '/SIMMARKED/'\n files = glob.glob('*defect*.dat')\n if not os.path.exists(simmarkedFolder):\n os.makedirs(simmarkedFolder)\n for file in files:\n fExt = file.split('.')\n fpath = fExt[:-1]\n fpathList = fpath[0].split('\\\\')\n fpathName = os.path.basename(fpath[0]).split('.')[0]\n \n #print(fpath)\n imgFile = '.'.join(fpath)+'.jpg'\n outImg = simmarkedFolder+fpathName+'SIMMARKED.jpg'\n data = np.loadtxt(file)\n locs = np.where(abs(data)==1)\n x = locs[0]\n y = locs[1]\n\n numDefects = x.shape[0]\n #print(fpathList)\n imgcv = cv2.imread(imgFile)\n for i in range(numDefects):\n imgcv = cv2.circle(imgcv, (y[i], x[i]), 2, (255,0,0), -1)\n im = Image.fromarray(imgcv)\n im.save(outImg)\n #f.write('{} {} {} {}\\r\\n'.format(y[i]-3,x[i]-3,y[i]+3,x[i]+3));\n os.chdir(reset)", "def transformMaskToFunc(self):\n # make sure mask output dir exists\n self.createMaskOutputDir()\n\n self.logger.info('transforming MNI mask to functional space')\n\n ### - brain extraction on the hi-res anat image, if specified\n outputFile = join(self.outputDir, 'hires_brain.nii.gz')\n if self.settings['skullStrip']:\n self.logger.info('skull stripping hi-res subject anatomical')\n if not exists(outputFile):\n cmdList = ['bet', self.settings['subjAnat'], outputFile, '-f', '0.35']\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n else:\n self.logger.info('using existing: {}'.format(outputFile))\n else:\n self.logger.info('copying {} to {}'.format(self.settings['subjAnat'], outputFile))\n cmdList = ['cp', self.settings['subjAnat'], outputFile]\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n\n ### register MNI standard --> hires\n self.logger.info('creating mni2hires transformation matrix')\n outputFile = join(self.outputDir, 'mni2hires.mat')\n if not exists(outputFile):\n cmdList = ['flirt', '-in', self.settings['MNI_standard'],\n '-ref', join(self.outputDir, 'hires_brain.nii.gz'),\n '-out', join(self.outputDir, 'mni_HIRES'),\n '-omat', outputFile,\n '-bins', '256', '-cost', 'corratio',\n '-searchrx', '-180', '180',\n '-searchry', '-180', '180',\n '-searchrz', '-180', '180',\n '-dof', '9', '-interp', 'trilinear']\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n else:\n self.logger.info('using existing: {}'.format(outputFile))\n\n ### register hires --> functional space\n self.logger.info('creating hires2func transformation matrix')\n outputFile = join(self.outputDir, 'hires2func.mat')\n if not exists(outputFile):\n cmdList = ['flirt', '-in', join(self.outputDir, 'hires_brain.nii.gz'),\n '-ref', join(self.outputDir, 'exampleFunc.nii.gz'),\n '-out', join(self.outputDir, 'hires_FUNC'),\n '-omat', outputFile,\n '-bins', '256', '-cost', 'corratio',\n '-searchrx', '-90', '90',\n '-searchry', '-90', '90',\n '-searchrz', '-90', '90',\n '-dof', '9', '-interp', 'trilinear']\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n else:\n self.logger.info('using existing: {}'.format(outputFile))\n\n ### concatenate mni2hires and hires2func to create mni2func transform\n self.logger.info('concatenating mni2hires and hires2func matrices')\n outputFile = join(self.outputDir, 'mni2func.mat')\n if not exists(outputFile):\n # Note that the transform after '-concat' should be 2nd transform you want applied\n cmdList = ['convert_xfm', '-omat', outputFile,\n '-concat', join(self.outputDir, 'hires2func.mat'),\n join(self.outputDir, 'mni2hires.mat')]\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n else:\n self.logger.info('using existing: {}'.format(outputFile))\n\n ### apply mni2func transform to the chosen mask; this will create the weighted version of\n # mask in subject functional space\n self.logger.info('applying mni2func transform to {}'.format(self.settings['MNI_mask']))\n self.weightedMaskPath = join(self.maskOutputDir, (self.settings['outputPrefix'] + '_FUNC_weighted'))\n cmdList = ['flirt', '-in', self.settings['MNI_mask'],\n '-ref', join(self.outputDir, 'exampleFunc.nii.gz'),\n '-out', self.weightedMaskPath,\n '-applyxfm', '-init', join(self.outputDir, 'mni2func.mat'),\n '-interp', 'trilinear']\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)\n\n ### binarize the weighted FUNC space mask\n self.logger.info('creating binarized mask of {}'.format(self.weightedMaskPath))\n self.binarizedMaskPath = self.weightedMaskPath.replace('FUNC_weighted', 'FUNC_mask')\n cmdList = ['fslmaths', self.weightedMaskPath, '-bin', self.binarizedMaskPath]\n self.logger.debug(' '.join(cmdList))\n subprocess.call(cmdList)", "def get_png_string(mask_array):\n # Convert the new mask back to an image.\n image = PIL.Image.fromarray(mask_array.astype('uint8')).convert('RGB')\n # Save the new image to a PNG byte string.\n byte_buffer = io.BytesIO()\n image.save(byte_buffer, format='png')\n byte_buffer.seek(0)\n return byte_buffer.read()", "def saveImage(self):\r\n files = listdir(self.out_dir)\r\n filename = \"slicer-{}-output\".format(self.slice_mode)\r\n\r\n counter = 1\r\n while filename + self.props.extension in files:\r\n filename = \"slicer-\" + self.slice_mode + \"-output\" + str(counter)\r\n counter += 1\r\n\r\n fullname = path.join(self.out_dir, filename + self.props.extension)\r\n self.final_img.save(fullname)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the featurized representation for a state.
def featurize_state(state): scaled = scaler.transform([state]) featurized = featurizer.transform(scaled) #print("this featurized",featurized) #print("this is featurized[0]",featurized[0]) return featurized[0]
[ "def state_description(self):\n return self._state_description", "def __str__(self):\n\t\tret = self.name + \"\\n\"\n\t\tfor k,v in self.states.items():\n\t\t\tret += v.__str__() + \"\\n\"\n\t\treturn ret", "def to_json(self):\n\n return self.belief_state.to_json()", "def state_string(self):\n return SupvisorsStates._to_string(self.state)", "def __str__(self):\r\n f = ', '.join([str(f) for f in self.features])\r\n if self._name:\r\n return '%s(%s)' %(self._name, f)\r\n # else this is just a Feature wrapper, no need to add anything\r\n return f", "def str_state(self):\n return self.IMAGE_STATES[int(self.state)]", "def state_to_string(state):\n return ('i: \\t' + str(state[2][0]) + '\\t' + str(state[2][1]) + '\\n'\n 'v: \\t' + str(state[1][0]) + '\\t'+str(state[1][1]) + '\\n'\n 'o: \\t' + str(state[0][0]) + '\\t'+str(state[0][1]) + '\\n'\n 'h: \\t' + str(state[3][0]) + '\\t'+str(state[3][1]) + '\\n'\n 'p: \\t' + str(state[4][0]) + '\\t'+str(state[4][1]) + '\\n')", "def state_model_input(cls, state: State) -> np.ndarray:\n st = state.state_as_array()\n st = st.reshape([1, 9])\n return st", "def to_json(self):\n return self.get_state()", "def __str__(self):\n text = \"Attractor \" + self.label + \"\\n\"\n text += \"\\tLength: \"+ str(len(self.states)) + \"\\n\"\n text += \"\\tBasin: \"+ str(self.basin) + \"\\n\"\n text += \"\\tWith nodes: \"+ ', '.join(self.node_names) + \"\\n\" \n text += \"\\tWith states: \"\n for a in self.states: text += \" -> \" + state_to_str(a)\n return text.strip()", "def features_descriptions(self):\n return self.features.descriptions()", "def state_transform(state):\n if isinstance(state, str):\n return np.array([int(s) for s in state])\n else:\n return str(state)[1:-1].replace(' ', '')", "def to_string(self):\n string = []\n\n if isinstance(self.weights, list): # This State is belong to dur model, print name only\n string.append(\"~s\" + ' \"' + self.name + '\"')\n for ste in self.pdf:\n if ste:\n string.append(ste.to_string())\n\n if \"\" in string:\n string.remove(\"\")\n\n return \"\\n\".join(string)", "def state_shape(self):\n pass", "def to_state(self):\n return self._to_state", "def state_dict(self):\n return {'model': self.model.state_dict()}", "def show_state():\n resultjson = _run_speedify_cmd([\"state\"])\n return find_state_for_string(resultjson[\"state\"])", "def state2str(state: Union[dict, str]) -> str:\n\n if type(state) is str:\n return state\n\n return \"\".join([str(state[x]) for x in sorted(state)])", "def proof_state_embedding(self, state: predictions.ProofState):\n return jp._proof_state_embedding(self, state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return length of longest suffix of 'a' matching a prefix of 'b' that is at least 'min_length' characters long. If no such overlap exists, return 0.
def overlap(a, b, min_length=3): start = 0 # start all the way at the left while True: start = a.find(b[:min_length], start) # look for b's prefix in a if start == -1: # no more occurrences to right return 0 # found occurrence; check for full suffix/prefix match if b.startswith(a[start:]): return len(a)-start start += 1 # move just past previous match
[ "def longest_common_prefix_len(a, b):\n for i, (x, y) in enumerate(zip(a, b)):\n if x != y:\n return i\n return i + 1", "def find_longest(self, s1, s2):\n min_l = min(len(s1), len(s2))\n l_common_prefix = 0\n for i in range(min_l):\n if s1[i] == s2[i]:\n l_common_prefix += 1\n else:\n break\n return s1[:l_common_prefix]", "def max_prefix(self,b):\n word1 = self.name\n word2 = b.name\n index = 1\n if (len(word1) or len(word2)) < 1:\n return 0\n while index <= len(word1):\n if word1[0:index] != word2[0:index]:\n return index\n index += 1\n return index", "def get_longest_common_substring_length(s1, s2):\n m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]\n longest, x_longest = 0, 0\n for x in range(1, 1 + len(s1)):\n for y in range(1, 1 + len(s2)):\n if s1[x - 1] == s2[y - 1]:\n m[x][y] = m[x - 1][y - 1] + 1\n if m[x][y] > longest:\n longest = m[x][y]\n x_longest = x\n else:\n m[x][y] = 0\n return s1[x_longest - longest:x_longest]", "def max_suffix_match(str1, str2):\n result = 0\n for (char1, char2) in zip(str1[::-1], str2[::-1]):\n assert char1 in \"ACGT\"\n if char1 in DEGENERACY_MAP[char2]:\n result += 1\n else:\n break\n return result", "def Levenshtein_distance(a, b):\n if len(a) == 0:\n return len(b)\n elif len(b) == 0:\n return len(a)\n elif a[0] == b[0]:\n return Levenshtein_distance(a[1:], b[1:])\n else:\n return 1 + min(Levenshtein_distance(a[1:], b), Levenshtein_distance(a, b[1:]), Levenshtein_distance(a[1:], b[1:]))", "def longest_common_prefix(string1, string2):\n\n i = 0\n while i < len(string1) and len(string2) and string1[1] == string2[i]:\n i += 1\n return string1[:i]", "def max_prefix_match(str1, str2):\n result = 0\n for (char1, char2) in zip(str1, str2):\n assert char1 in \"ACGT\"\n if char1 in DEGENERACY_MAP[char2]:\n result += 1\n else:\n break\n return result", "def LCStr(a,b):\n L = np.zeros((len(a), len(b)))\n z = 0 # Use to denote the max element in L\n ret = [] # All the common sub-string with longest length will store in ret\n for i in xrange(0,len(a)):\n for j in xrange(0,len(b)):\n if a[i] == b[j]:\n if i==0 or j==0:\n L[i][j] = 1\n else:\n L[i][j] = L[i-1][j-1] + 1\n if L[i][j] > z:\n z = L[i][j]\n ret = []\n if L[i][j] == z:\n ret.append(a[int(i-z+1):i+1])\n else:\n L[i][j] = 0\n return ret", "def maximal_suffix_match(query_seq, search_window,\n min_cap_size=8, max_cap_count=5):\n query_seq_size = len(query_seq)\n offset = query_seq_size - min_cap_size\n suffix_seq = query_seq[offset:]\n suffixes = []\n first_search = True\n while len(suffixes) <= max_cap_count:\n suffix = re.search(suffix_seq, search_window[offset:])\n if suffix is None:\n break\n elif first_search and not suffix.start():\n '''Suffix was found on first search at very beginning of window;\n it's VERY likely just an extension.'''\n return None\n else:\n extra_base_count = 0\n offset += suffix.start()\n while min_cap_size + extra_base_count < query_seq_size:\n if query_seq[-min_cap_size-extra_base_count-1] \\\n == search_window[offset-extra_base_count-1]:\n extra_base_count += 1\n else:\n break\n suffixes.append((-(extra_base_count + min_cap_size),\n offset - extra_base_count))\n offset += 1\n first_search = False\n try:\n if len(suffixes) <= max_cap_count:\n suffix = min(suffixes)\n return (suffix[1], -suffix[0])\n else:\n return None\n except ValueError:\n # No suffixes found\n return None", "def total_length(s1: str, s2: str) -> int:\n return len(s1+s2)", "def longest(s1, s2):\n new_str = ''\n for letter in s1 + s2:\n if letter not in new_str:\n new_str += letter\n return ('').join(sorted(new_str))", "def find_longest_common_substring(x: str, y: str) -> str:\n # Check whether the input strings are None or empty\n if not x or not y:\n return ''\n\n m, n = len(x), len(y)\n # Initialization\n subproblems = [[0] * (n + 1) for i in range(m + 1)]\n # Bottom-up calculation\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n x_curr, y_curr = x[i - 1], y[j - 1]\n if x_curr == y_curr:\n subproblems[i][j] = subproblems[i - 1][j - 1] + 1\n\n # Find the maximum of the longest common suffix of possible prefixes, which\n # is exactly the longest common substring\n i_max, max_length = 0, subproblems[0][0]\n for i in range(m + 1):\n for j in range(n + 1):\n if subproblems[i][j] > max_length:\n i_max = i\n max_length = subproblems[i][j]\n return x[i_max - max_length:i_max]\n # Overall running time complexity: O(mn)", "def extend_len(start, end, min_len, min_pos=1):\n delta = np.maximum(0, min_len - (end - start + 1))\n ext = np.floor(0.5 * delta).astype(np.int)\n start_ext = np.maximum(min_pos, start - ext)\n end_ext = end + np.maximum(0, (min_len - (end - start_ext + 1)))\n assert np.all(min_len <= (end_ext - start_ext + 1))\n return (start_ext, end_ext)", "def overlap(str1, str2):\n\tlen1 = len(str1)\n\tlen2 = len(str2)\n\tmaxPossible = min(len(str1), len(str2))\n\tfor maxOver in range(maxPossible, 0, -1):\n\t\tif str1[:maxOver] == str2[len2 - maxOver:]:\n\t\t\treturn maxOver, str2, str1\n\t\telif str2[:maxOver] == str1[len1 - maxOver:]:\n\t\t\treturn maxOver, str1, str2\n\treturn 0, str1, str2", "def common_suffix(text1, text2):\n # Quick check for common null cases.\n if not text1 or not text2 or text1[-1] != text2[-1]:\n return 0\n # Binary search.\n # Performance analysis: https://neil.fraser.name/news/2007/10/09/\n pointermin = 0\n\n # TODO: move as args\n len_text1 = len(text1)\n len_text2 = len(text2)\n\n pointermax = min(len_text1, len_text2)\n pointermid = pointermax\n pointerend = 0\n\n while pointermin < pointermid:\n if (text1[-pointermid:len_text1 - pointerend] == text2[-pointermid:len(text2) - pointerend]):\n pointermin = pointermid\n pointerend = pointermin\n else:\n pointermax = pointermid\n pointermid = (pointermax - pointermin) // 2 + pointermin\n return pointermid", "def longestB(s):\n n = len(s)\n maxlen = 1\n istart = 0\n for i in range(n):\n left = i\n right = i\n while(left >= 0 and right < n and s[left] == s[right]):\n left -= 1\n right += 1\n if (right - left - 1 > maxlen):\n maxlen = right - left - 1\n istart = left + 1\n\n left = i\n right = i+1\n while(left >= 0 and right < n and s[left] == s[right]):\n left -= 1\n right += 1\n if (right - left - 1 > maxlen):\n maxlen = right - left - 1\n istart = left + 1\n return s[istart: istart+ maxlen]", "def suffix_length(oracle):\n start = len(oracle(''))\n for i in range(17):\n if len(oracle('A'*i)) != start:\n break\n return start - i", "def longest_common_subsequence(first, second):\r\n c = _lcs_length(first.sequence, second.sequence)\r\n return DNA(\"%s/%s LCS\" % (first.name, second.name), _lcs_backtrack(c, first.sequence, second.sequence))", "def find_long_substr_len(self, s):\r\n if not s:\r\n return 0\r\n\r\n l = 0\r\n n = len(s)\r\n p = float(\"-inf\")\r\n\r\n for r in range(n):\r\n # Calculate target and actual number of unique characters\r\n t = r - l + 1\r\n x = len(set(s[l:r + 1]))\r\n if x == t:\r\n p = max(p, x)\r\n else:\r\n # Increase left limit to remove repeated character\r\n l += 1\r\n\r\n if p == float(\"-inf\"):\r\n # No unique characters found\r\n return 0\r\n else:\r\n return p" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a custom event formatter helper.
def GetEventFormatterHelper(cls, identifier): identifier = identifier.lower() return cls._custom_formatter_helpers.get(identifier)
[ "def custom_template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.custom_template_formatters)", "def RegisterEventFormatterHelper(cls, formatter_helper_class):\n identifier = formatter_helper_class.IDENTIFIER.lower()\n if identifier in cls._custom_formatter_helpers:\n raise KeyError((\n 'Custom event formatter helper already set for identifier: '\n '{0:s}.').format(formatter_helper_class.IDENTIFIER))\n\n cls._custom_formatter_helpers[identifier] = formatter_helper_class()", "def formatter(self): # type: () -> Formatter\n return self._formatter", "def event_format(etype):\n return EVENT_MAP[etype]", "def _get_formatter(self, attribute):\n\n entry = self._numeric_format.get(attribute, None)\n if isinstance(entry, string_types):\n fmt_str = '{0:' + entry + '}'\n return fmt_str.format\n elif callable(entry):\n return entry\n else:\n return str", "def get_file_formatter() -> logging.Formatter:\n return get_formatter(settings.log_format_file())", "def get_formatter(log_format: str) -> logging.Formatter:\n return logging.Formatter(log_format)", "def get_formatter(fmt) -> \"Formatter\":\n if fmt is None:\n fmt = DEFAULT_FORMATTER_NAME\n\n if fmt == \"json\":\n from .fmt_json import JsonFormatter\n\n return JsonFormatter()\n elif fmt == \"pretty\":\n from .fmt_pretty import PrettyFormatter\n\n return PrettyFormatter()\n else:\n raise ValueError(\"unknown format: {}\".format(fmt))", "def format(self, event):\n return self.getMessage(event)", "def formatter(provider: typing.Callable[..., payload.ColumnMajor]) -> typing.Callable[..., typing.Any]:\n\n @functools.wraps(provider)\n def wrapper(*args, **kwargs) -> typing.Any:\n \"\"\"Wrapped provider with custom formatting.\n\n Args:\n *args: Original args.\n **kwargs: Original kwargs.\n\n Returns:\n Formatted data.\n \"\"\"\n return self.format(provider(*args, **kwargs))\n\n return wrapper", "def template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.template_formatters)", "def _create_formatter(self, level, fmt):\n color = ''\n reset = ''\n\n if sys.stdout.isatty():\n color_name = self.config['COLOR'].get(level.upper())\n\n if color_name:\n color = getattr(colorama.Fore, color_name.upper(), '')\n\n if color:\n reset = colorama.Fore.RESET\n\n return logging.Formatter(fmt.format(color=color, reset=reset))", "def textFormatter(self):\n return self.__formatter", "def get_format_string(self) -> str:\n pass", "def format_event(self):\n # You need to create the document to which you're going to\n # create elements within. \n document = xml.dom.minidom.Document()\n element = self.create_element(document)\n formatted = element.toprettyxml(indent=\" \", encoding=\"UTF-8\")\n return formatted", "def getFormat(self): # real signature unknown; restored from __doc__\n pass", "def custom_template_formatters(self):\n return sorted(f'{k}{self.TEMPLATE_ASSIGNER}{v.template}'\n for k, v in self.custom_template_items)", "def __init__(self, orig_formatter=None):\n self.orig_formatter = orig_formatter", "def wrapper(*args, **kwargs) -> typing.Any:\n return self.format(provider(*args, **kwargs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a custom event formatter helper. The custom event formatter helpers are identified based on their lower case identifier.
def RegisterEventFormatterHelper(cls, formatter_helper_class): identifier = formatter_helper_class.IDENTIFIER.lower() if identifier in cls._custom_formatter_helpers: raise KeyError(( 'Custom event formatter helper already set for identifier: ' '{0:s}.').format(formatter_helper_class.IDENTIFIER)) cls._custom_formatter_helpers[identifier] = formatter_helper_class()
[ "def GetEventFormatterHelper(cls, identifier):\n identifier = identifier.lower()\n return cls._custom_formatter_helpers.get(identifier)", "def RegisterEventFormatterHelpers(cls, formatter_helper_classes):\n for formatter_helper_class in formatter_helper_classes:\n cls.RegisterEventFormatterHelper(formatter_helper_class)", "def add_helper(self, helpers, fmt):\n c_helper = wformat(helpers, fmt)\n for i, helper in enumerate(c_helper.split()):\n self.c_helpers[helper] = True\n if helper not in LuaHelpers:\n raise RuntimeError(\"No such helper {}\".format(helper))\n setattr(fmt, \"hnamefunc\" + str(i),\n LuaHelpers[helper].get(\"name\", helper))", "def register_as(formatter_class, name):\n warnings.warn(\"Use behave.formatter._registry.register_as() instead.\",\n DeprecationWarning, stacklevel=2)\n _registry.register_as(name, formatter_class)", "def custom_template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.custom_template_formatters)", "def register_custom_loader(self, format_name, loader_func):\n if not callable(loader_func):\n raise ValueError(\"loader_func must be callable\")\n self._loader_map[format_name] = loader_func", "def formatter(provider: typing.Callable[..., payload.ColumnMajor]) -> typing.Callable[..., typing.Any]:\n\n @functools.wraps(provider)\n def wrapper(*args, **kwargs) -> typing.Any:\n \"\"\"Wrapped provider with custom formatting.\n\n Args:\n *args: Original args.\n **kwargs: Original kwargs.\n\n Returns:\n Formatted data.\n \"\"\"\n return self.format(provider(*args, **kwargs))\n\n return wrapper", "def add_info_formatter(self, formatter):\n self.info_formatters.append(formatter)", "def add_tag(tag):\n def decorator(func: Callable[[Any], str]):\n @wraps(func)\n def wrapper(*args, **kwargs):\n return f'<{tag}>{func(*args, **kwargs)}</{tag}>'\n return wrapper\n return decorator", "def register_format(self, serializer):\n self._serializers[serializer.format] = serializer", "def register(self, field_name, func, fake=...):\r\n ...", "def custom_template_formatters(self):\n return sorted(f'{k}{self.TEMPLATE_ASSIGNER}{v.template}'\n for k, v in self.custom_template_items)", "def register_exception_handler(exc_type, custom_error_from_exception):\n registry[exc_type] = custom_error_from_exception", "def register_format(self, format, creator):\n\n self.serializer_format_dict[format] = creator", "def register_import(task, ffilter, mime=None, native_format=0, format_name=\"\"):\n if mime:\n del_index = -1\n for i in range(0,len(import_list)):\n if import_list[i][2] == mime:\n del_index = i\n if del_index != -1:\n del import_list[del_index]\n\n import_list.append((task, ffilter, mime, native_format, format_name))\n mod2text[task.__module__] = format_name", "def register_dumper(format: 'str', module: 'str', class_: 'str', ext: 'str') -> 'None': # pylint: disable=redefined-builtin\n dumper = getattr(importlib.import_module(module), class_)\n if not issubclass(dumper, Dumper):\n raise RegistryError('dumper must be a Dumper subclass')\n\n Extractor.register_dumper(format, module, class_, ext)\n TraceFlow.register_dumper(format, module, class_, ext)\n logger.info('registered output format: %s', dumper.__name__)", "def register_dumper(cls, format: 'str', module: 'str', class_: 'str', ext: 'str') -> 'None':\n cls.__output__[format] = (module, class_, ext)", "def register_handler(handler_name, default_char):\n def _handler(e):\n return default_char, e.end\n codecs.register_error(handler_name, _handler)", "def event_format(etype):\n return EVENT_MAP[etype]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers custom event formatter helpers. The formatter classes are identified based on their lower case data type.
def RegisterEventFormatterHelpers(cls, formatter_helper_classes): for formatter_helper_class in formatter_helper_classes: cls.RegisterEventFormatterHelper(formatter_helper_class)
[ "def RegisterEventFormatterHelper(cls, formatter_helper_class):\n identifier = formatter_helper_class.IDENTIFIER.lower()\n if identifier in cls._custom_formatter_helpers:\n raise KeyError((\n 'Custom event formatter helper already set for identifier: '\n '{0:s}.').format(formatter_helper_class.IDENTIFIER))\n\n cls._custom_formatter_helpers[identifier] = formatter_helper_class()", "def GetEventFormatterHelper(cls, identifier):\n identifier = identifier.lower()\n return cls._custom_formatter_helpers.get(identifier)", "def register_as(formatter_class, name):\n warnings.warn(\"Use behave.formatter._registry.register_as() instead.\",\n DeprecationWarning, stacklevel=2)\n _registry.register_as(name, formatter_class)", "def custom_template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.custom_template_formatters)", "def add_helper(self, helpers, fmt):\n c_helper = wformat(helpers, fmt)\n for i, helper in enumerate(c_helper.split()):\n self.c_helpers[helper] = True\n if helper not in LuaHelpers:\n raise RuntimeError(\"No such helper {}\".format(helper))\n setattr(fmt, \"hnamefunc\" + str(i),\n LuaHelpers[helper].get(\"name\", helper))", "def event_format(etype):\n return EVENT_MAP[etype]", "def custom_template_formatters(self):\n return sorted(f'{k}{self.TEMPLATE_ASSIGNER}{v.template}'\n for k, v in self.custom_template_items)", "def test_load_plugin_formatters(self):\n formatters_dict = self.writer.load_plugin_formatters(\"test/formatters\")\n\n self.assertEqual(len(formatters_dict), 12)\n self.assertEqual(formatters_dict[\"a\"].plugins(), ['a', 'b', 'c', 'd'])\n self.assertEqual(formatters_dict[\"f\"].plugins(), ['e', 'f', 'g', 'h'])\n self.assertEqual(formatters_dict[\"k\"].plugins(), ['i', 'j', 'k', 'l'])\n\n self.assertEqual(formatters_dict[\"a\"].format_metric('', '', '', '', '', '', ''), ('metric1Formatter', {'tag1': 'a', 'tag2': 'b'}))\n self.assertEqual(formatters_dict[\"b\"].format_metric('', '', '', '', '', '', ''), ('metric1Formatter', {'tag1': 'a', 'tag2': 'b'}))\n self.assertEqual(formatters_dict[\"c\"].format_metric('', '', '', '', '', '', ''), ('metric1Formatter', {'tag1': 'a', 'tag2': 'b'}))\n self.assertEqual(formatters_dict[\"d\"].format_metric('', '', '', '', '', '', ''), ('metric1Formatter', {'tag1': 'a', 'tag2': 'b'}))\n self.assertEqual(formatters_dict[\"e\"].format_metric('', '', '', '', '', '', ''), ('metric2Formatter', {'tag3': 'a', 'tag4': 'b'}))\n self.assertEqual(formatters_dict[\"h\"].format_metric('', '', '', '', '', '', ''), ('metric2Formatter', {'tag3': 'a', 'tag4': 'b'}))\n self.assertEqual(formatters_dict[\"k\"].format_metric('', '', '', '', '', '', ''), ('metric3Formatter', {'tag5': 'a', 'tag6': 'b'}))", "def register_datehandler(locales,parse_class,display_class):\n for lang_str in locales:\n LANG_TO_PARSER[lang_str] = parse_class\n LANG_TO_DISPLAY[lang_str] = display_class\n\n parse_class._locale = display_class._locale = GrampsLocale(lang=locales[0])", "def formats(typestr):\n def decorator(cls):\n if not inspect.isclass(cls):\n raise ValueError(\"The @formats decorator is only valid for classes\")\n attrname_format = '__formats{}'\n index = 0\n while hasattr(cls, attrname_format.format(index)):\n index += 1\n setattr(cls, attrname_format.format(index), FormatType(typestr))\n return cls\n return decorator", "def formatter(provider: typing.Callable[..., payload.ColumnMajor]) -> typing.Callable[..., typing.Any]:\n\n @functools.wraps(provider)\n def wrapper(*args, **kwargs) -> typing.Any:\n \"\"\"Wrapped provider with custom formatting.\n\n Args:\n *args: Original args.\n **kwargs: Original kwargs.\n\n Returns:\n Formatted data.\n \"\"\"\n return self.format(provider(*args, **kwargs))\n\n return wrapper", "def register_exception_handler(exc_type, custom_error_from_exception):\n registry[exc_type] = custom_error_from_exception", "def _build_formatters(self, formatter_enum):\n formatters = []\n formatter_names = (formatter_option.value for formatter_option in formatter_enum)\n for formatter_name in formatter_names:\n formatter = getattr(self, formatter_name)\n if formatter:\n formatters.append(formatter)\n return formatters", "def add_info_formatter(self, formatter):\n self.info_formatters.append(formatter)", "def configure_converter(converter: GenConverter):\n converter.register_unstructure_hook(\n str, lambda v: v if v.__class__ is str else v.value\n )\n converter.register_structure_hook(datetime, validate_datetime)", "def register():\n log_file.register_mime_type('.md', MIME_TYPE)\n log_file.register_mime_type('.markdown', MIME_TYPE)\n log_file.register_file_processor(MIME_TYPE, load_file, write_file)", "def register_custom_loader(self, format_name, loader_func):\n if not callable(loader_func):\n raise ValueError(\"loader_func must be callable\")\n self._loader_map[format_name] = loader_func", "def set_formatter(self, formatter):\n self.__dict__['formatter'] = formatter\n for h in self.handlers:\n h.setFormatter(self.formatter)", "def formatters(self):\n return self._build_formatters(self.Formatter)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new ``SlashCommandCategory`` with the given parameters.
def __new__(cls, slasher_application_command, deepness): if deepness > APPLICATION_COMMAND_CATEGORY_DEEPNESS_MAX: raise RuntimeError('Cannot add anymore sub-category under sub-categories.') self = object.__new__(cls) self.name = slasher_application_command.name self.description = slasher_application_command.description self._sub_commands = {} self._parent_reference = None self.default = slasher_application_command.default self._auto_completers = None self._deepness = deepness self._exception_handlers = None self._self_reference = None return self
[ "def new(ws, **kwargs):\n assert 'command_id' not in kwargs, \"New commands should not be given a command_id yet\"\n assert 'type' not in kwargs, \"New commands don't need to be passed a type parameter, one will be assigned.\"\n data = {'command_id': str(uuid.uuid1()),\n 'type': 'command'}\n data.update(kwargs)\n cmd = Command(ws, data)\n cmd.new_command = True\n return cmd", "def create_category(\n board_id: BoardID, slug: str, title: str, description: str\n) -> Category:\n board = DbBoard.query.get(board_id)\n if board is None:\n raise ValueError(f'Unknown board ID \"{board_id}\"')\n\n category = DbCategory(board.id, slug, title, description)\n board.categories.append(category)\n\n db.session.commit()\n\n return _db_entity_to_category(category)", "def create_category(category_title):\n return Category.objects.create(category_title=category_title)", "def torrents_create_category(\n self,\n name=None,\n save_path=None,\n download_path=None,\n enable_download_path=None,\n **kwargs\n ):\n # default to actually using the specified download path\n if enable_download_path is None and download_path is not None:\n enable_download_path = True\n\n data = {\n \"category\": name,\n \"savePath\": save_path,\n \"downloadPath\": download_path,\n \"downloadPathEnabled\": enable_download_path,\n }\n self._post(\n _name=APINames.Torrents, _method=\"createCategory\", data=data, **kwargs\n )", "def create_category(self, category):\n return self._post('torrents/createCategory', data={'category': category.lower()})", "def create_category(cat_name, cat_desc):\n\treturn Category.models.create(name=cat_name, desc=cat_desc)", "def create_category_parms(self, node):\n\n #parm_group\n parm_group = node.parmTemplateGroup()\n\n #fldr\n fldr = parm_group.containingFolder('categories')\n \n #lightcategories\n hou_parm_template = hou.StringParmTemplate(\"lightcategories\", \"Light Selection\", 1, default_value='*')\n hou_parm_template.setHelp(\"A space-separated list of categories. Lights in these categories will illuminate this object.\")\n #append\n parm_group.appendToFolder(fldr, hou_parm_template)\n #set in node\n node.setParmTemplateGroup(parm_group)\n\n #log\n parm = node.parm(\"lightcategories\")\n parm_name = parm.name()\n parm_value = parm.eval()\n print('Added parm. {0} - {1}'.format(parm_name, parm_value))\n\n #fldr\n fldr = parm_group.containingFolder('categories')\n\n #reflectcategories\n hou_parm_template = hou.StringParmTemplate(\"reflectcategories\", \"Reflection Selection\", 1, default_value='*')\n hou_parm_template.setHelp(\"A space-separated list of categories. Objects in these categories will reflect in this object.\")\n #append\n parm_group.appendToFolder(fldr, hou_parm_template)\n #set in node\n node.setParmTemplateGroup(parm_group)\n\n #log\n parm = node.parm(\"reflectcategories\")\n parm_name = parm.name()\n parm_value = parm.eval()\n print('Added parm. {0} - {1}'.format(parm_name, parm_value))\n\n #fldr\n fldr = parm_group.containingFolder('categories')\n\n #refractcategories\n hou_parm_template = hou.StringParmTemplate(\"refractcategories\", \"Refraction Selection\", 1, default_value='*')\n hou_parm_template.setHelp(\"A space-separated list of categories. Objects in these categories will be visible in refraction rays.\")\n #append\n parm_group.appendToFolder(fldr, hou_parm_template)\n #set in node\n node.setParmTemplateGroup(parm_group)\n\n #log\n parm = node.parm(\"refractcategories\")\n parm_name = parm.name()\n parm_value = parm.eval()\n print('Added parm. {0} - {1}'.format(parm_name, parm_value))", "def from_strings(*args):\n strs = []\n for arg in args:\n strs.append(arg)\n cptr = pyniNVCategory.n_createCategoryFromNVStrings(strs)\n return nvcategory(cptr)", "def _create(self, *args, **kwargs):\n details = self.inspect()\n\n config = ConfigDict(image_id=self._id, **kwargs)\n config[\"command\"] = details.config.get(\"cmd\")\n config[\"env\"] = self._split_token(details.config.get(\"env\"))\n config[\"image\"] = copy.deepcopy(details.repotags[0]) # Falls to https://github.com/containers/python-podman/issues/65\n config[\"labels\"] = copy.deepcopy(details.labels)\n config[\"args\"] = [config[\"image\"], *config[\"command\"]]\n\n logging.debug(\"Image %s: create config: %s\", self._id, config)\n with self._client() as podman:\n id_ = podman.CreateContainer(config)[\"container\"]\n cntr = podman.GetContainer(id_)\n return Container(self._client, id_, cntr[\"container\"])", "def construct_channel(self, *args, **kwargs):\n channel = self.get_channel(*args, **kwargs) # Create ChannelNode from data in self.channel_info\n\n scrape_directory(channel, FOLDER)\n\n raise_for_invalid_channel(channel) # Check for errors in channel construction\n\n return channel", "def createCategories(context):\n # Only run step if a flag file is present\n filenames = context.readDataFile('mars_categories_container.txt')\n if filenames is None:\n return\n #filenames = [f\n # for f in filenames.split('\\n')\n # if f and not f.startswith('#') ]\n site = context.getSite()\n if not CAT_CONTAINER in site.objectIds():\n _createObjectByType('Categories Container',\n site,\n id=CAT_CONTAINER,\n title='Mars Categories',\n excludeFromNav=True)\n container = getattr(site, CAT_CONTAINER)\n publish_all(container)\n #for filename in filenames:\n # datafile = context.readDataFile('categories/'+filename)\n # container.importCatsFromText(datafile, filename)", "def create(cls, **kwargs):", "def test__ChannelMetadataGuildCategory__new__0():\n parent_id = 202209170029\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202209170030, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n \n channel_metadata = ChannelMetadataGuildCategory(\n parent_id = parent_id,\n name = name,\n permission_overwrites = permission_overwrites,\n position = position,\n )\n _assert_fields_set(channel_metadata)\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)", "def add_slash_command(\n self,\n cmd,\n name: str = None,\n description: str = None,\n guild_ids: typing.List[int] = None,\n options: list = None,\n connector: dict = None,\n has_subcommands: bool = False,\n ):\n name = name or cmd.__name__\n name = name.lower()\n guild_ids = guild_ids if guild_ids else []\n if name in self.commands:\n tgt = self.commands[name]\n if not tgt.has_subcommands:\n raise error.DuplicateCommand(name)\n has_subcommands = tgt.has_subcommands\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n description = description or getdoc(cmd)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description, connector)\n\n _cmd = {\n \"func\": cmd,\n \"description\": description,\n \"guild_ids\": guild_ids,\n \"api_options\": options,\n \"connector\": connector or {},\n \"has_subcommands\": has_subcommands,\n }\n obj = model.CommandObject(name, _cmd)\n self.commands[name] = obj\n self.logger.debug(f\"Added command `{name}`\")\n return obj", "def registerCommand(self,name,alias,access_lvl,msg_types_list,category,args,short_help,long_help=None):\r\n\t\tkc = category.lower()\r\n\t\tk = name.lower()\r\n\t\tid = -1\r\n\t\t\r\n\t\t\r\n\t\tif args is None:\r\n\t\t\targs = \"\"\r\n\t\tif category is None:\r\n\t\t\tcategory = \"None\"\r\n\t\tif alias is None:\r\n\t\t\talias = \"\"\r\n\t\t\r\n\t\tif k in self.__cmd_dict:\r\n\t\t\tself.__log(INFO,\"Attempt to register already existing command:\", name)\r\n\t\telse:\r\n\t\t\tid = len(self.__cmd_dict)\r\n\t\t\tnc = Command(id,name,alias,access_lvl,msg_types_list,category,args,short_help,long_help)\r\n\t\t\tself.__cmd_dict[k] = nc\r\n\t\t\t\r\n\t\t\tself.__max_cmd_len = max(len(nc.name)+len(nc.alias)+len(nc.args)+4,self.__max_cmd_len) \r\n\t\t\t\r\n\t\t\tif alias != None and alias != \"\":\r\n\t\t\t\tka = alias.lower()\r\n\t\t\t\tif ka in self.__alias_dict:\r\n\t\t\t\t\tself.__log(INFO,\"Attempt to register already existing alias:\", ka)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.__alias_dict[ka] = nc\r\n\t\t\t\t\r\n\t\t\tif kc in self.__category_dict:\r\n\t\t\t\tself.__category_dict[kc].append(nc)\r\n\t\t\telse:\r\n\t\t\t\tself.__category_dict[kc] = [nc]\r\n\t\t\t\r\n\t\treturn id", "def to_category(self, resp): \n category = Category()\n if 'id' in resp:\n category.set_id(resp['id'])\n if 'name' in resp:\n category.set_name(resp['name'])\n return category", "def _create_callback(cls, resource_json, user):\n category = cls(data_dict=resource_json[0], user=user)\n user.add_category(category)\n return category", "def create_command(args):\n if args.subparser_name == \"analyze\":\n cmd = instarepo.commands.analyze.AnalyzeCommand(args)\n elif args.subparser_name == \"fix\":\n cmd = instarepo.commands.fix.FixCommand(args)\n elif args.subparser_name == \"list\":\n cmd = instarepo.commands.list.ListCommand(args)\n elif args.subparser_name == \"clone\":\n cmd = instarepo.commands.clone.CloneCommand(args)\n elif args.subparser_name == \"login\":\n cmd = instarepo.commands.login.LoginCommand(args)\n elif args.subparser_name == \"logout\":\n cmd = instarepo.commands.logout.LogoutCommand(args)\n else:\n raise ValueError(f\"Sub-parser {args.subparser_name} is not implemented\")\n return cmd", "def create_channel(channelName=None, channelStorage=None, retentionPeriod=None, tags=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls the respective auto completion function of the command. This method is a coroutine.
async def invoke_auto_completion(self, client, interaction_event, auto_complete_option): auto_complete_option_type = auto_complete_option.type if ( (auto_complete_option_type is APPLICATION_COMMAND_OPTION_TYPE_SUB_COMMAND) or (auto_complete_option_type is APPLICATION_COMMAND_OPTION_TYPE_SUB_COMMAND_CATEGORY) ): options = auto_complete_option.options if (options is not None): option = options[0] sub_commands = self._sub_commands if (sub_commands is not None): try: sub_command = sub_commands[option.name] except KeyError: pass else: await sub_command.invoke_auto_completion(client, interaction_event, option)
[ "def ctxCompletion():\n pass", "def ctxCompletion(*args, **kwargs):\n\n pass", "def completion(ctx, shell=None):\n completer = pycomplete.Completer(ctx)\n print(completer.render(shell))", "def auto_command(self, cmd, uuser, cchannel, suggesting=True):\n if cmd==\"\":\n return cmd\n i=0\n n=0\n it=\"\"\n if cmd in self.kcs_:\n it=cmd\n return it\n else:\n while ( n < len(self.kcs_)):\n kc=self.kcs_[n] \n n+=1\n if kc[:len(cmd)]==string.lower(cmd):\n hit=kc\n i+=1\n if i >= 2: #not unique\n it = floodProtect.suggest_command(self, cmd, cchannel, False)\n break\n if i == 1:\n return str(hit)\n else:\n #too long\n if i == 0:\n it = floodProtect.suggest_command(self, cmd, cchannel, True)\n self.logger.debug(\"command cropped\"+str(it)+ \"returning \"+cmd)\n \"\"\" return the origin command if cropped to nothing \"\"\"\n return cmd if str(it) =='[]' else ''\n return it\n return it", "def autocomplete(self):\n if self.completion_env_var_name not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword-1]\n except IndexError:\n current = ''\n cmd_names = self.get_commands().keys()\n\n if current:\n self.stdout.write(unicode(' '.join(\n [name for name in cmd_names if name.startswith(current)])))\n\n sys.exit(1)", "def process_dynamic_completion(self, completion):\n if len(completion.split()) > 1:\n completion = '\\\"' + completion + '\\\"'\n\n if self.validate_completion(completion):\n yield Completion(completion, -len(self.unfinished_word))", "def get_completions(self, document, _):\n word_before_cursor = document.get_word_before_cursor(WORD=True)\n words = self.text_utils.get_tokens(document.text)\n commands = []\n if len(words) == 0:\n return commands\n if self.completing_command(words, word_before_cursor):\n commands = ['gh']\n else:\n if 'gh' not in words:\n return commands\n if self.completing_subcommand(words, word_before_cursor):\n commands = list(SUBCOMMANDS.keys())\n else:\n if self.completing_arg(words, word_before_cursor):\n commands = self.arg_completions(words, word_before_cursor)\n else:\n commands = self.completing_subcommand_option(\n words,\n word_before_cursor)\n completions = self.text_utils.find_matches(\n word_before_cursor, commands, fuzzy=self.fuzzy_match)\n return completions", "def _add_autocomplete_function(self, parameter_names, function):\n if isinstance(function, SlashCommandParameterAutoCompleter):\n function = function._command\n \n auto_completer = SlashCommandParameterAutoCompleter(\n function,\n parameter_names,\n self._deepness,\n self,\n )\n \n auto_completers = self._auto_completers\n if (auto_completers is None):\n auto_completers = []\n self._auto_completers = auto_completers\n \n auto_completers.append(auto_completer)\n \n resolved = 0\n sub_commands = self._sub_commands\n for sub_command in sub_commands.values():\n resolved += sub_command._try_resolve_auto_completer(auto_completer)\n \n if resolved:\n _reset_parent_schema(self)\n \n return auto_completer", "def gen_cmd_and_param_completions(self):\n # if the user inputs space or 'az', provide recommendation instead of\n # default completion when recommender is enabled\n has_user_input = self.current_command or self.unfinished_word.strip()\n if not has_user_input and self.shell_ctx.recommender.enabled:\n return\n if self.complete_command:\n for param in self.command_param_info.get(self.current_command, []):\n if self.validate_param_completion(param, self.leftover_args):\n yield self.yield_param_completion(param, self.unfinished_word)\n elif not self.leftover_args:\n for child_command in self.subtree.children:\n if self.validate_completion(child_command):\n full_command = f'{self.current_command} {child_command}'.strip()\n yield Completion(child_command, -len(self.unfinished_word),\n display_meta=self.command_description.get(full_command))", "def autocomplete(self, str):\n return list(cognipy_call(self._uid, \"AutoComplete\", str))", "def autoCompleteShow(self, command):\n names = self.interp.getAutoCompleteList(command,\n includeMagic=self.autoCompleteIncludeMagic,\n includeSingle=self.autoCompleteIncludeSingle,\n includeDouble=self.autoCompleteIncludeDouble)\n if not self.autoCompleteWxMethods:\n root = introspect.getRoot(command, terminator='.')\n try:\n # we have to use locals, right?\n #print root\n object = eval(root, self.interp.locals)\n #print object\n # only filter attribute names of wxPython objects\n if isinstance(object, wx.Object):\n names.remove('this')\n names.remove('thisown')\n names = [name for name in names if name[0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']\n except:\n # what is the proper thing to do here?\n pass\n if names:\n options = ' '.join(names)\n offset = 0\n self.AutoCompShow(offset, options)", "def handle_completed_command(self, command: CompletedCommandType) -> None:\n pass", "def get_completions(self, info):\n return []", "async def create_completion(loop: BaseEventLoop, prompt: str, temperature: float,\n max_tokens: int, stop: Union[str, List[str]], engine=\"davinci\") -> openai.Completion:\n return await loop.run_in_executor(None, sync_create_completion, prompt, temperature, max_tokens, stop, engine)", "def getCompletions(self, cmd: unicode) -> List[ghidra.app.plugin.core.console.CodeCompletion]:\n ...", "def auto_tweet_on_completion(self, auto_tweet_on_completion):\n\n self._auto_tweet_on_completion = auto_tweet_on_completion", "def objective_completion(self, objective_completion):\n\n self._objective_completion = objective_completion", "def sync_create_completion(prompt: str, temperature: float,\n max_tokens: int, stop: Union[str, List[str]], engine=\"davinci\") -> openai.Completion:\n return openai.Completion.create(engine=engine, prompt=prompt, temperature=temperature, max_tokens=max_tokens,\n stop=stop)", "def autocomplete(self, cursor_byte, prev_text=\"\"):\n # Autocomplete globals, variables in scope, functions, or methods\n # Identify list of globals, functions or methods (regardless of code loc)\n global_vars = self.parse_globals()\n # global_vars = self.parse_with_query(queries.globals_query)\n # functions = self.parse_with_query(queries.functions_query)\n imports = self.parse_with_query(queries.imports_query)\n functions = self.parse_functions()\n\n class_vars, func_vars, funcs_in_scope = self.parse_vars_funcs_in_scope(cursor_byte)\n suggestions = []\n line_len = len(prev_text)\n prev_token = prev_text.split()[-1] if line_len > 0 else ''\n # When trailing chars are 'self.' only add class vars and funcs\n if line_len >= 5 and 'self.' in prev_token:\n suggestions.extend(class_vars)\n suggestions.extend(funcs_in_scope)\n prev_token = prev_token.split('.')[-1]\n else:\n for l in [global_vars, imports, func_vars, functions]:\n suggestions.extend(l)\n\n # Filter for text in the last line\n suggestions = [s for s in suggestions if s.startswith(prev_token)]\n suggestions = list(set(suggestions))\n \n return suggestions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the slash command category as an application command option. Returns
def as_option(self): sub_commands = self._sub_commands if sub_commands: options = [sub_command.as_option() for sub_command in sub_commands.values()] else: options = None return ApplicationCommandOption( self.name, self.description, ApplicationCommandOptionType.sub_command_group, options = options, default = self.default, )
[ "def getCategory(cmd):\n\tf = lookForCommand(cmd)\n\tif f is None:\n\t\treturn (None, None)\n\n\ttry:\n\t\tcategory = re.findall( r\"# (\\S+)\", f.__doc__ )[0]\n\texcept:\n\t\tcategory = \"other\"\n\t\n\treturn (category, category_description[category][1])", "def _category_key(command: commands.Command) -> str:\r\n\r\n if not command.cog:\r\n return '**\\u200bNo Category:**'\r\n\r\n with suppress(AttributeError):\r\n if command.cog.category:\r\n return f'**{command.cog.category}**'\r\n return f'**{command.cog_name}**'", "def get_command(self):\n if self.app_path is None:\n self.log_error(self.app_name + \": No app path specified. \\\n You must use a subclass\")\n return None\n\n cmd = '{} -v {} '.format(self.app_path, self.c_dict['VERBOSITY'])\n\n for args in self.args:\n cmd += args + \" \"\n\n if len(self.infiles) == 0:\n self.log_error(self.app_name+\": No input filenames specified\")\n return None\n\n for infile in self.infiles:\n cmd += infile + \" \"\n\n if self.param != \"\":\n cmd += self.param + \" \"\n\n for obs_file in self.point_obs_files:\n cmd += \"-point_obs \" + obs_file + \" \"\n\n for obs_file in self.grid_obs_files:\n cmd += \"-grid_obs \" + obs_file + \" \"\n\n if self.outdir == \"\":\n self.log_error(self.app_name+\": No output directory specified\")\n return None\n\n cmd += '-outdir {}'.format(self.outdir)\n return cmd", "def GetCommandListForCategory(category):\r\n global RootCommandList\r\n \r\n commandList = RootCommandList\r\n if category is not None and category != '':\r\n for category in category.split('/'):\r\n if category in commandList:\r\n commandList = commandList[category]\r\n else:\r\n newCommandList = CommandList(category)\r\n commandList.addCommand(category, newCommandList)\r\n commandList = newCommandList\r\n return commandList", "def get_cmd(self):\n\t\tif self.cmd is not None:\n\t\t\treturn self.cmd\n\t\tcmd = \"/system/bin/sh /system/bin/am \"\n\t\tif self.prefix:\n\t\t\tcmd += self.prefix\n\t\tif self.action is not None:\n\t\t\tcmd += \" -a \" + self.action\n\t\tif self.data_uri is not None:\n\t\t\tcmd += \" -d \" + self.data_uri\n\t\tif self.mime_type is not None:\n\t\t\tcmd += \" -t \" + self.mime_type\n\t\tif self.category is not None:\n\t\t\tcmd += \" -c \" + self.category\n\t\tif self.component is not None:\n\t\t\tcmd += \" -n \" + self.component\n\t\tif self.flag is not None:\n\t\t\tcmd += \" -f \" + self.flag\n\t\tif self.extra_keys:\n\t\t\tfor key in self.extra_keys:\n\t\t\t\tcmd += \" --esn '%s'\" % key\n\t\tif self.extra_string:\n\t\t\tfor key in self.extra_string.keys():\n\t\t\t\tcmd += \" -e '%s' '%s'\" % (key, self.extra_string[key])\n\t\tif self.extra_boolean:\n\t\t\tfor key in self.extra_boolean.keys():\n\t\t\t\tcmd += \" -ez '%s' %s\" % (key, self.extra_boolean[key])\n\t\tif self.extra_int:\n\t\t\tfor key in self.extra_int.keys():\n\t\t\t\tcmd += \" -ei '%s' %s\" % (key, self.extra_int[key])\n\t\tif self.extra_long:\n\t\t\tfor key in self.extra_long.keys():\n\t\t\t\tcmd += \" -el '%s' %s\" % (key, self.extra_long[key])\n\t\tif self.extra_float:\n\t\t\tfor key in self.extra_float.keys():\n\t\t\t\tcmd += \" -ef '%s' %s\" % (key, self.extra_float[key])\n\t\tif self.extra_uri:\n\t\t\tfor key in self.extra_uri.keys():\n\t\t\t\tcmd += \" -eu '%s' '%s'\" % (key, self.extra_uri[key])\n\t\tif self.extra_component:\n\t\t\tfor key in self.extra_component.keys():\n\t\t\t\tcmd += \" -ecn '%s' %s\" % (key, self.extra_component[key])\n\t\tif self.extra_array_int:\n\t\t\tfor key in self.extra_array_int.keys():\n\t\t\t\tcmd += \" -eia '%s' %s\" % (key, \",\".join(self.extra_array_int[key]))\n\t\tif self.extra_array_long:\n\t\t\tfor key in self.extra_array_long.keys():\n\t\t\t\tcmd += \" -ela '%s' %s\" % (key, \",\".join(self.extra_array_long[key]))\n\t\tif self.extra_array_float:\n\t\t\tfor key in self.extra_array_float.keys():\n\t\t\t\tcmd += \" -efa '%s' %s\" % (key, \",\".join(self.extra_array_float[key]))\n\t\tif self.flags:\n\t\t\tcmd += \" \" + \" \".join(self.flags)\n\t\tif self.suffix:\n\t\t\tcmd += \" \" + self.suffix\n\t\tself.cmd = cmd\n\t\treturn self.cmd", "def help_cat(self):\n print(help_msg.cmds['cat'])", "def __new__(cls, slasher_application_command, deepness):\n if deepness > APPLICATION_COMMAND_CATEGORY_DEEPNESS_MAX:\n raise RuntimeError('Cannot add anymore sub-category under sub-categories.')\n \n self = object.__new__(cls)\n self.name = slasher_application_command.name\n self.description = slasher_application_command.description\n self._sub_commands = {}\n self._parent_reference = None\n self.default = slasher_application_command.default\n self._auto_completers = None\n self._deepness = deepness\n self._exception_handlers = None\n self._self_reference = None\n \n return self", "def to_option(attr):\n return '--%s' % attr.lower().replace('_', '-')", "def get_command(self, ctx, cmd_name):\n if cmd_name == \"ls\":\n cmd_name = \"list\"\n return click.Group.get_command(self, ctx, cmd_name)", "def GetCommandLineOptions(self):\n return self.args_", "async def handle_subcommand(self, ctx: context.SlashContext, data: dict):\n if data[\"data\"][\"name\"] not in self.subcommands:\n return\n base = self.subcommands[data[\"data\"][\"name\"]]\n sub = data[\"data\"][\"options\"][0]\n sub_name = sub[\"name\"]\n if sub_name not in base:\n return\n ctx.subcommand_name = sub_name\n sub_opts = sub[\"options\"] if \"options\" in sub else []\n for x in sub_opts:\n if \"options\" in x or \"value\" not in x:\n sub_group = x[\"name\"]\n if sub_group not in base[sub_name]:\n return\n ctx.subcommand_group = sub_group\n selected = base[sub_name][sub_group]\n\n # This is to temporarily fix Issue #97, that on Android device\n # does not give option type from API.\n temporary_auto_convert = {}\n for n in selected.options:\n temporary_auto_convert[n[\"name\"].lower()] = n[\"type\"]\n\n args = (\n await self.process_options(\n ctx.guild,\n x[\"options\"],\n selected.connector,\n temporary_auto_convert,\n )\n if \"options\" in x\n else {}\n )\n self._discord.dispatch(\"slash_command\", ctx)\n await self.invoke_command(selected, ctx, args)\n return\n selected = base[sub_name]\n\n # This is to temporarily fix Issue #97, that on Android device\n # does not give option type from API.\n temporary_auto_convert = {}\n for n in selected.options:\n temporary_auto_convert[n[\"name\"].lower()] = n[\"type\"]\n\n args = (\n await self.process_options(\n ctx.guild, sub_opts, selected.connector, temporary_auto_convert\n )\n if \"options\" in sub\n else {}\n )\n self._discord.dispatch(\"slash_command\", ctx)\n await self.invoke_command(selected, ctx, args)", "def categories(self) -> Dict[str, 'AbsCommand']:\n categories = {}\n for cmd in self.commands:\n categories.setdefault(cmd.category, [])\n categories[cmd.category].append(cmd)\n return categories", "def application_mailbox_command(self):\n return self._application_mailbox_command", "def get_command(self):\n return 'item'", "def _convert_command( self, human ):\n \n shortcut_map = {\n 'm':'mute',\n 'p':'pause',\n 's':'stop',\n 'pn':'PlayNext',\n 'pp':'PlayPrev'\n }\n \n if human in shortcut_map.keys():\n return shortcut_map[human]\n \n # Volume\n if human[:3] == 'vol':\n return 'SetVolume(%s)' % human[4:]\n \n return human", "def advapi32_GetManagedApplicationCategories(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwReserved\", \"pAppCategory\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _implied_cmd(self, cmdline: str) -> Optional[str]:\n try:\n results = parser.CommandParser().parse_all(cmdline)\n except cmdexc.NoSuchCommandError:\n return None\n\n result = results[0]\n if result.cmd.name != \"set-cmd-text\":\n return cmdline\n if not result.args:\n return None # doesn't look like this sets a command\n *flags, cmd = result.args\n if \"-a\" in flags or \"--append\" in flags or not cmd.startswith(\":\"):\n return None # doesn't look like this sets a command\n return cmd.lstrip(\":\")", "def get_command(self, ctx, name):\n return self.aliases.get(name)", "def get_launch_cmd():\n if get_os_name() == \"osx\":\n return \"/Applications/Miro Video Converter.app\"\n else:\n print \"no clue\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers an autocomplete function.
def _add_autocomplete_function(self, parameter_names, function): if isinstance(function, SlashCommandParameterAutoCompleter): function = function._command auto_completer = SlashCommandParameterAutoCompleter( function, parameter_names, self._deepness, self, ) auto_completers = self._auto_completers if (auto_completers is None): auto_completers = [] self._auto_completers = auto_completers auto_completers.append(auto_completer) resolved = 0 sub_commands = self._sub_commands for sub_command in sub_commands.values(): resolved += sub_command._try_resolve_auto_completer(auto_completer) if resolved: _reset_parent_schema(self) return auto_completer
[ "def _register_autocomplete(self, autocomplete):\n self[autocomplete.__name__] = autocomplete", "def py_autocomplete():\n\n return get_all_users()", "def enable_autocomplete(self, ):\n return self._set_one_attribute(self.AttributeNames.AUTOCOMPLETE, 'on')", "def autocomplete(self, str):\n return list(cognipy_call(self._uid, \"AutoComplete\", str))", "def autocomplete(self, cursor_byte, prev_text=\"\"):\n # Autocomplete globals, variables in scope, functions, or methods\n # Identify list of globals, functions or methods (regardless of code loc)\n global_vars = self.parse_globals()\n # global_vars = self.parse_with_query(queries.globals_query)\n # functions = self.parse_with_query(queries.functions_query)\n imports = self.parse_with_query(queries.imports_query)\n functions = self.parse_functions()\n\n class_vars, func_vars, funcs_in_scope = self.parse_vars_funcs_in_scope(cursor_byte)\n suggestions = []\n line_len = len(prev_text)\n prev_token = prev_text.split()[-1] if line_len > 0 else ''\n # When trailing chars are 'self.' only add class vars and funcs\n if line_len >= 5 and 'self.' in prev_token:\n suggestions.extend(class_vars)\n suggestions.extend(funcs_in_scope)\n prev_token = prev_token.split('.')[-1]\n else:\n for l in [global_vars, imports, func_vars, functions]:\n suggestions.extend(l)\n\n # Filter for text in the last line\n suggestions = [s for s in suggestions if s.startswith(prev_token)]\n suggestions = list(set(suggestions))\n \n return suggestions", "def register(func):\n print('running register(%s)' % func)\n registry.append(func)\n return func", "def set_completer(function=None):\r\n\r\n global _completer_function\r\n _completer_function = function\r\n\r\n def complete_handler(buffer, cursor, candidates):\r\n start = _get_delimited(buffer, cursor)[0]\r\n delimited = buffer[start:cursor]\r\n for state in xrange(100): # TODO arbitrary, what's the number used by gnu readline?\r\n completion = None\r\n try:\r\n completion = function(delimited, state)\r\n except:\r\n pass\r\n if completion:\r\n candidates.add(completion)\r\n else:\r\n break\r\n return start\r\n\r\n _reader.addCompletor(complete_handler)", "def _add_suggestion_string(self, *args, **kwargs):\r\n self.suggestions.add(GenericTranslation(*args, **kwargs))", "def add_function(self, function):\n self.functions.append(function)", "def register_hook(self, hook, function):\n if hook in self.hooks:\n self.hooks[hook].append(function)\n else:\n self.hooks[hook] = [ function ]", "def trigger_autocomplete(self, selector):\n self.browser.execute_script(\n '$(\"' + selector + '\").autocomplete(\"search\");'\n )", "def test_include():\n from autocomplete import AutoCompleter", "def add_function(self, func):\n self._conf['functions'].append(func)", "def test_autocomplete_defaults(self):\n st.text_input(\"foo\")\n proto = self.get_delta_from_queue().new_element.text_input\n self.assertEqual(\"\", proto.autocomplete)\n\n st.text_input(\"password\", type=\"password\")\n proto = self.get_delta_from_queue().new_element.text_input\n self.assertEqual(\"new-password\", proto.autocomplete)", "def search_aliases(connection,searchterm):\n return", "def register(self, field_name, func, fake=...):\r\n ...", "def register(name):\n\n def add_to_dict(func):\n ATTACKS[name] = func\n return func\n\n return add_to_dict", "def autoCompleteShow(self, command):\n names = self.interp.getAutoCompleteList(command,\n includeMagic=self.autoCompleteIncludeMagic,\n includeSingle=self.autoCompleteIncludeSingle,\n includeDouble=self.autoCompleteIncludeDouble)\n if not self.autoCompleteWxMethods:\n root = introspect.getRoot(command, terminator='.')\n try:\n # we have to use locals, right?\n #print root\n object = eval(root, self.interp.locals)\n #print object\n # only filter attribute names of wxPython objects\n if isinstance(object, wx.Object):\n names.remove('this')\n names.remove('thisown')\n names = [name for name in names if name[0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']\n except:\n # what is the proper thing to do here?\n pass\n if names:\n options = ' '.join(names)\n offset = 0\n self.AutoCompShow(offset, options)", "def setAutoFileComplete(self, flag):\n if flag == True:\n comp = filecompleter.Completer()\n readline.set_completer_delims(' \\t\\n;')\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(comp.complete)\n else:\n readline.set_completer_delims('')\n readline.parse_and_bind(\"\")\n readline.set_completer(None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Margin on lines = SP@ (100 %discount) CP@ Profit Margin % on lines = (SP@ (100 %discount) CP@) / SP@ Profit Margin % TOTAL = sum all lines.
def _crea8s_product_margin_percent(self, cursor, user, ids, field_name, arg, context=None): result = {} for sale in self.browse(cursor, user, ids, context=context): result[sale.id] = 0.0 for line in sale.order_line: result[sale.id] += line.crea8s_profit_margin_percent or 0.0 return result
[ "def _compute_margin(self, cursor, user, ids, field_name, arg, context = None):\n logger = logging.getLogger('product_standard_margin')\n if context is None:\n context = {}\n res = {}\n if not ids:\n return res\n for product in ids:\n res[product] = {'margin_absolute': 0, 'margin_relative': 0}\n for product in self.browse(cursor, user, ids):\n cost = product.cost_price\n sale = self._amount_tax_excluded(cursor, user, [product.id], context=context)[product.id]\n # sale = product.list_price\n res[product.id]['standard_margin'] = sale - cost\n if sale == 0:\n logger.debug(\"Sale price for product ID %d is 0, cannot compute margin rate...\", product.id)\n res[product.id]['standard_margin_rate'] = 999.\n else:\n res[product.id]['standard_margin_rate'] = (sale - cost) / sale * 100\n return res", "def discount_line_total(cartitem, discount):\n if config_value('TAX', 'DEFAULT_VIEW_TAX'):\n return taxed_discount_line_total(cartitem, discount)\n else:\n return untaxed_discount_line_total(cartitem, discount)", "def _compute_amount(self):\n for line in self:\n price = 0.0\n discount_value = 0\n if line.discount_type == 'percent':\n price = line.price_unit * (1 - (line.discount_rate or 0.0) / 100.0)\n #line.discount = ((line.price_unit * line.product_qty) * line.discount_rate)/100\n else:\n price = line.price_unit - line.discount_rate\n #line.discount = line.product_qty * line.discount_rate\n\n taxes = line.taxes_id.compute_all(price, line.order_id.currency_id, line.product_qty,\n product=line.product_id, partner=line.order_id.partner_id)\n\n line.update({\n 'price_tax': taxes['total_included'] - taxes['total_excluded'],\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })", "def taxed_discount_line_total(cartitem, discount):\n price = untaxed_discount_line_total(cartitem, discount)\n taxer = satchmo_tax._get_taxprocessor()\n price = price + taxer.by_price(cartitem.product.taxClass, price)\n\n return price", "def taxed_discount_cart_total(cart, discount):\n total = Decimal('0.00')\n\n for item in cart:\n total += taxed_discount_line_total(item, discount)\n\n return total", "def base_order_total(order: \"Order\", lines: Iterable[\"OrderLine\"]) -> Money:\n currency = order.currency\n subtotal = _base_order_subtotal(order, lines)\n shipping_price = order.base_shipping_price\n order_discounts = order.discounts.all()\n order_discounts_to_update = []\n for order_discount in order_discounts:\n subtotal_before_discount = subtotal\n shipping_price_before_discount = shipping_price\n if order_discount.type == OrderDiscountType.VOUCHER:\n subtotal = apply_discount_to_value(\n value=order_discount.value,\n value_type=order_discount.value_type,\n currency=currency,\n price_to_discount=subtotal,\n )\n elif order_discount.value_type == DiscountValueType.PERCENTAGE:\n subtotal = apply_discount_to_value(\n value=order_discount.value,\n value_type=order_discount.value_type,\n currency=currency,\n price_to_discount=subtotal,\n )\n shipping_price = apply_discount_to_value(\n value=order_discount.value,\n value_type=order_discount.value_type,\n currency=currency,\n price_to_discount=shipping_price,\n )\n else:\n temporary_undiscounted_total = subtotal + shipping_price\n if temporary_undiscounted_total.amount > 0:\n temporary_total = apply_discount_to_value(\n value=order_discount.value,\n value_type=order_discount.value_type,\n currency=currency,\n price_to_discount=temporary_undiscounted_total,\n )\n total_discount = temporary_undiscounted_total - temporary_total\n subtotal_discount = (\n subtotal / temporary_undiscounted_total\n ) * total_discount\n shipping_discount = total_discount - subtotal_discount\n\n subtotal -= subtotal_discount\n shipping_price -= shipping_discount\n shipping_discount_amount = shipping_price_before_discount - shipping_price\n subtotal_discount_amount = subtotal_before_discount - subtotal\n total_discount_amount = shipping_discount_amount + subtotal_discount_amount\n if order_discount.amount != total_discount_amount:\n order_discount.amount = total_discount_amount\n order_discounts_to_update.append(order_discount)\n if order_discounts_to_update:\n OrderDiscount.objects.bulk_update(order_discounts_to_update, [\"amount_value\"])\n return max(subtotal + shipping_price, zero_money(currency))", "def cmd_set_margins(self):\n logger.debug(\"--> cmd_set_margins\")", "def compute_sale_price(self, perc_margin=0.0):\n new_sale_price = self.standard_price * ((100 + perc_margin) / 100)\n return new_sale_price", "def lineitem_price(self):\n price = Decimal(\"0.00\")\n for li in self.lineitems.all():\n price += li.total\n return price", "def calculate_total_price(prices, discount):\n \n sum_prices = 0\n\n for price in prices:\n dis = discount/100\n pricedis = price - price * dis\n print(pricedis)\n sum_prices = sum_prices + pricedis\n print(sum)\n return math.floor(sum_prices)", "def calcularSubtotal(self):", "def _compute_amount(self):\n for line in self:\n price = line.unit_price * (1 - 0.0 / 100.0)\n taxes = line.tax_id.compute_all(\n price, line.sale_order_id.currency_id, line.qty,\n product=line.product_id,\n partner=line.sale_order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get(\n 'taxes', [])),\n 'value': taxes['total_included'],\n 'value_before_tax': line.unit_price * line.qty\n })", "def _get_invoice_qty(self):\n for line in self:\n qty_invoiced = 0.0\n poin_invoiced = 0.0\n poib_invoiced = 0.0\n nbpieces_invoiced = 0.0\n nbcolis_invoiced = 0.0\n nbpal_invoiced = 0.0\n for invoice_line in line.invoice_lines:\n if invoice_line.invoice_id.state != 'cancel':\n if invoice_line.invoice_id.type == 'out_invoice':\n qty_invoiced += invoice_line.di_qte_un_saisie\n poin_invoiced += invoice_line.di_poin\n poib_invoiced += invoice_line.di_poib\n nbpieces_invoiced += invoice_line.di_nb_pieces\n nbcolis_invoiced += invoice_line.di_nb_colis\n nbpal_invoiced += invoice_line.di_nb_palette\n elif invoice_line.invoice_id.type == 'out_refund':\n qty_invoiced -= invoice_line.di_qte_un_saisie\n poin_invoiced -= invoice_line.di_poin\n poib_invoiced -= invoice_line.di_poib\n nbpieces_invoiced -= invoice_line.di_nb_pieces\n nbcolis_invoiced -= invoice_line.di_nb_colis\n nbpal_invoiced -= invoice_line.di_nb_palette\n line.di_qte_un_saisie_fac = qty_invoiced\n line.di_poin_fac = poin_invoiced\n line.di_poib_fac = poib_invoiced\n line.di_nb_pieces_fac = nbpieces_invoiced\n line.di_nb_colis_fac = nbcolis_invoiced\n line.di_nb_palette_fac = nbpal_invoiced\n super(SaleOrderLine, self)._get_invoice_qty()", "def margin(president, accident): \r\n return sum([Ptraffic[(p,a)]*Ppresident[p]*Paccident[a] \r\n for p in president\r\n for a in accident])", "def subtotal(self):\r\n return self.cantidad * self.precio", "def total(anItem):\r\n\r\n if anItem.price <= 0:\r\n raise ValueError(\"total does not compute prices at or below 0 cent\")\r\n\r\n if anItem.necessary:\r\n tax = anItem.price * 0.01\r\n else:\r\n tax = anItem.price * 0.09\r\n return anItem.price + tax", "def _get_to_invoice_qty(self):\n for line in self:\n if line.order_id.state in ['sale', 'done']:\n if line.product_id.invoice_policy == 'order':\n line.di_qte_a_facturer_un_saisie = line.di_qte_un_saisie - line.di_qte_un_saisie_fac\n line.di_poin_a_facturer = line.di_poin - line.di_poin_fac\n line.di_poib_a_facturer = line.di_poib - line.di_poib_fac\n line.di_nb_pieces_a_facturer = line.di_nb_pieces - line.di_nb_pieces_fac\n line.di_nb_colis_a_facturer = line.di_nb_colis - line.di_nb_colis_fac\n line.di_nb_palette_a_facturer = line.di_nb_palette - line.di_nb_palette_fac\n else:\n line.di_qte_a_facturer_un_saisie = line.di_qte_un_saisie_liv - line.di_qte_un_saisie_fac\n line.di_poin_a_facturer = line.di_poin_liv - line.di_poin_fac\n line.di_poib_a_facturer = line.di_poib_liv - line.di_poib_fac\n line.di_nb_pieces_a_facturer = line.di_nb_pieces_liv - line.di_nb_pieces_fac\n line.di_nb_colis_a_facturer = line.di_nb_colis_liv - line.di_nb_colis_fac\n line.di_nb_palette_a_facturer = line.di_nb_palette_liv - line.di_nb_palette_fac\n else:\n line.di_qte_a_facturer_un_saisie = 0.0\n line.di_poin_a_facturer = 0.0\n line.di_poib_a_facturer = 0.0\n line.di_nb_pieces_a_facturer = 0\n line.di_nb_colis_a_facturer = 0\n line.di_nb_palette_a_facturer = 0.0 \n super(SaleOrderLine, self)._get_to_invoice_qty()", "def profit(Qte,pv):\n return Qte*(pv-coutMenu) - coutEntretien", "def save(self, *args, **kwargs):\n self.lineitem_total = self.comp.price * self.quantity\n super().save(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Generate html or LaTex tables provided (lists of) of models. The function can create publication quality tables in various formats from statsmodels or estimagic results. It allows for extensive customization via optional arguments and almost limitless flexibility when using a twostage approach where the ``return_type`` is set to ``"render_inputs"``, the resulting dictionary representation of the table is modified and that modified version is then passed to ``render_latex`` or ``render_html``. The formatting of the numbers in the table is completely configurable via the ``number_format`` argument. By default we round to three significant digits (i.e. the three leftmost nonzero digits are displayed). This is very different from other table packages and motivated by the fact that most estimation tables give a wrong feeling of precision by showing too many decimal points.
def estimation_table( models, *, return_type="dataframe", render_options=None, show_col_names=True, show_col_groups=None, show_index_names=False, show_inference=True, show_stars=True, show_footer=True, custom_param_names=None, custom_col_names=None, custom_col_groups=None, custom_index_names=None, custom_notes=None, confidence_intervals=False, significance_levels=(0.1, 0.05, 0.01), append_notes=True, notes_label="Note:", stats_options=None, number_format=("{0:.3g}", "{0:.5f}", "{0:.4g}"), add_trailing_zeros=True, escape_special_characters=True, siunitx_warning=True, ): if not isinstance(models, (tuple, list)): raise TypeError(f"models must be a list or tuple. Not: {type(models)}") models = [_process_model(model) for model in models] model_names = _get_model_names(models) default_col_names, default_col_groups = _get_default_column_names_and_groups( model_names ) column_groups = _customize_col_groups( default_col_groups=default_col_groups, custom_col_groups=custom_col_groups ) column_names = _customize_col_names( default_col_names=default_col_names, custom_col_names=custom_col_names ) show_col_groups = _update_show_col_groups(show_col_groups, column_groups) stats_options = _set_default_stats_options(stats_options) body, footer = _get_estimation_table_body_and_footer( models, column_names, column_groups, custom_param_names, custom_index_names, significance_levels, stats_options, show_col_names, show_col_groups, show_stars, show_inference, confidence_intervals, number_format, add_trailing_zeros, ) render_inputs = { "body": body, "footer": footer, "render_options": render_options, } if return_type == "render_inputs": out = render_inputs elif str(return_type).endswith("tex"): out = render_latex( **render_inputs, show_footer=show_footer, append_notes=append_notes, notes_label=notes_label, significance_levels=significance_levels, custom_notes=custom_notes, siunitx_warning=siunitx_warning, show_index_names=show_index_names, show_col_names=show_col_names, escape_special_characters=escape_special_characters, ) elif str(return_type).endswith("html"): out = render_html( **render_inputs, show_footer=show_footer, append_notes=append_notes, notes_label=notes_label, custom_notes=custom_notes, significance_levels=significance_levels, show_index_names=show_index_names, show_col_names=show_col_names, escape_special_characters=escape_special_characters, ) elif return_type == "dataframe": if show_footer: footer.index.names = body.index.names out = pd.concat([body.reset_index(), footer.reset_index()]).set_index( body.index.names ) else: out = body else: raise ValueError( f"""Value of return type can be either of ['data_frame', 'render_inputs','latex' ,'html'] or a path ending with '.html' or '.tex'. Not: {return_type}.""" ) return_type = Path(return_type) if return_type.suffix not in (".html", ".tex"): return out else: return_type.write_text(out)
[ "def show_table(models: typing.List[Model]):\n if not models:\n click.echo(\"Empty!\")\n return\n\n headers = list(flatten_dict(models[0].to_dict()).keys())\n table = Texttable(MAX_TABLE_WIDTH)\n\n table.add_rows([headers] + [_convert_model_values(md) for md in models])\n click.echo(table.draw() + \"\\n\")", "def format_table(filename, savefilename, model=False, bracket=[\"(\", \")\"], LaTeX=True):\n\tif type(filename) == str:\n\t\tm = dataobj(filename)\n\telif type(filename).__name__ != \"dataobj\":\n\t\tprint(\"File {0} is not being supported.\".format(filename))\n\tN,nvar = m.data.shape\n\tnmodel = N/2 \n\tcoef = m.data[0:nmodel]\n\tstd = m.data[nmodel:N] \n\tcoefs = [[\"{0:.3f}\".format(i) for i in l] for l in coef.transpose()]\n\tstds = [[\"{0:.3f}\".format(i) for i in l] for l in std.transpose()]\n\ts = \"\"\n\tif LaTeX==False:\n\t\tif model != False:\n\t\t\ts = \"&\" + \"&\".join(model)+\" \\n\"\n\t\tfor i in range(0,nvar):\n\t\t\ts = s+m.date[i]+\"&\"+\"&\".join([j for j in coefs[i]]) + \"\\n\" + \"&\"+\"&\".join([bracket[0]+k+bracket[1] for k in stds[i]])+\"\\n\"\n\telse: \n\t\tfor i in range(0,nvar):\n\t\t\ts = s+m.date[i]+\"&\"+\"&\".join([j for j in coefs[i]]) + \"\\\\\\\\\\n\" + \"&\"+\"&\".join([bracket[0]+k+bracket[1] for k in stds[i]])+\"\\\\\\\\\\n\"\n\t\theader = \"\\\\begin{tabular}{c|\"\n\t\tfor i in range(0,nmodel):\n\t\t\theader = header + \"c\"\n\t\theader= header + \"|} \\n \\\\hline\"\n\t\tif model != False:\n\t\t\theader = header + \"&\" + \"&\".join(model)+\"\\\\\\\\ \\\\hline \\n\"\n\t\tend = \"\\hline\\\\end{tabular}\"\n\t\ts = header + s + end\n\tf = open(savefilename, \"w\")\n\tf.write(s)\n\tf.close()", "def html_table(matrix_or_array_like,\n float_fmt=None,\n raw=False,\n first_row_headers=False,\n caption=None,\n style=None,\n formatter=None):\n\n raw_table = matrix_or_array_like\n if not float_fmt:\n float_fmt = '%.2f'\n\n if not formatter:\n formatter = formatter_factory(default_fmt=float_fmt,\n outlier_fmt=float_fmt)\n\n if 'sympy.matrices' in str(type(matrix_or_array_like)):\n raw_table = array(raw_table)\n if style:\n html_table = ['<table style=\"%s\">' % style]\n else:\n html_table = ['<table>']\n if caption:\n html_table.append('<caption>%s</caption>' % caption)\n row_count = 0\n for row in raw_table:\n html_table.append('<tr>')\n for col in row:\n to_append = formatter(col)\n\n if first_row_headers and row_count == 0:\n html_table.append('<th>{0}</th>'.format(to_append))\n else:\n html_table.append('<td>{0}</td>'.format(to_append))\n\n html_table.append('</tr>')\n row_count += 1\n html_table.append('</table>')\n if raw:\n return ''.join(html_table)\n else:\n return HTML(''.join(html_table))", "def to_html(self):\n # create table 1\n body = \"\"\"<html>\n <head>\n <style>\n table {\n font-family: arial, sans-serif;\n border-collapse: collapse;\n width: 100%;\n }\n\n td, th {\n border: 1px solid #dddddd;\n text-align: left;\n padding: 8px;\n }\n\n </style>\n </head>\n <body>\n\n <h2>transcription-compare Table</h2>\n <table>\n <tr>\n <th>output_name</th>\n <th>distance</th>\n <th>error_rate</th>\n <th>substitution</th>\n <th>insertion</th>\n <th>deletion</th>\n </tr>\n <tbody>\n \"\"\"\n for index, identifier in enumerate(self.identifiers):\n body += \"\"\"<tr><td>{}</td>\"\"\".format(identifier)\n body += '\\n<td>' + str(self.distance[index]) + '</td>'\n body += '\\n<td>' + str(self.error_rate[index]) + '</td>'\n body += '\\n<td>' + str(self.substitution[index]) + '</td>'\n body += '\\n<td>' + str(self.insertion[index]) + '</td>'\n body += '\\n<td>' + str(self.deletion[index]) + '</td>\\n</tr>'\n body += \"\"\"</tbody>\n </table>\n \"\"\"\n body += \"\"\"<table>\\n<tr>\\n<th>error_type</th>\"\"\"\n for index, identifier in enumerate(self.identifiers):\n body += \"\"\" <th>{}</th>\"\"\".format(identifier)\n body += \"\"\"<th>percentage</th>\"\"\"\n body += \"\"\"</tr>\"\"\"\n body += self.multi_alignment_result.to_html_error_type(self.total_rows)\n body += \"\"\"</tbody>\n </table>\n \"\"\"\n\n body += self.multi_alignment_result.to_html()\n body += '\\n</body>\\n</html>'\n return body", "def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot", "def gen_table_md(models, section_header, table_header_list, out_txt_fname):\n\n # conditional delete/open markdown file\n if os.path.exists(out_txt_fname):\n os.remove(out_txt_fname)\n out = open(out_txt_fname, 'wb')\n\n # write section header markdown\n section_header = '## ' + section_header\n out.write(section_header + '\\n')\n\n # write table header markdown\n num_table_attrs = len(table_header_list)\n out.write(' | '.join(table_header_list) + '\\n')\n out.write(' | '.join(['---' for _ in range(0, num_table_attrs)]) + '\\n')\n\n # write model attributes\n for model in models:\n out.write(' | '.join([str(attr) for attr in model]))\n\n out.close()", "def to_html(self, products_data, detail_data):\n table_string = ''\n\n keys = Product().to_dict().keys()\n table_string += '<tr>' + ''.join(['<th>%s</th>' % key for key in keys]) + '</tr>\\n'\n\n for product in products_data:\n values = product.to_dict().values()\n table_string += '<tr>' + ''.join(['<td>%s</td>' % value for value in values]) + '</tr>\\n'\n\n table_string = '<table>\\n%s</table>\\n' % table_string\n\n return table_string", "def gen_table_md(models, section_header, out_txt_fname, write_mode='a+'):\n\n # conditional delete/open markdown file\n out = open(out_txt_fname, write_mode)\n\n # write section header markdown\n section_header = '## ' + section_header\n out.write(section_header + '\\n')\n\n # write table header markdown\n num_table_attrs = len(models.columns)\n out.write(' | '.join(models.columns) + '\\n')\n out.write(' | '.join(['---' for _ in range(0, num_table_attrs)]) + '\\n')\n\n # write model attributes\n for i in models.index:\n out.write(' | '.join([str(j) for j in list(models.loc[i,\\\n models.columns])\\\n ]) + '\\n')\n\n out.write('\\n')\n out.close()", "def to_html_table(\r\n dictset: Iterator[dict],\r\n limit: int = 5): \r\n def _to_html_table(data, limit):\r\n \r\n first_row = True\r\n highlight = False\r\n \r\n yield '<table class=\"table table-sm\">'\r\n for counter, record in enumerate(data):\r\n \r\n if first_row:\r\n yield '<thead class=\"thead-light\"><tr>'\r\n for key, value in record.items():\r\n yield '<th>' + key + '<th>\\n'\r\n yield '</tr></thead><tbody>'\r\n first_row = False\r\n \r\n if counter >= limit:\r\n break\r\n \r\n if highlight:\r\n yield '<tr style=\"background-color:#F4F4F4\">'\r\n else:\r\n yield '<tr>'\r\n highlight = not highlight\r\n for key, value in record.items():\r\n yield '<td>' + str(value) + '<td>\\n'\r\n yield '</tr>'\r\n \r\n yield '</tbody></table>'\r\n \r\n import types\r\n if isinstance(data, types.GeneratorType):\r\n yield f'<p>unknown rows x {len(record.items())} columns</p>'\r\n yield 'NOTE: the displayed records have been spent'\r\n if isinstance(data, list):\r\n yield f'<p>{len(data)} rows x {len(record.items())} columns</p>'\r\n\r\n return ''.join(_to_html_table(dictset, limit))", "def table():\n return render_template('table.html')", "def to_latex_table(self, experiment, **kwargs):\n\n if 'caption' not in kwargs or kwargs['caption'] is None:\n caption_text = \"\\\\caption{Parameters for Axelrod Simulations for Experiment Name: \"\n caption_text += experiment\n caption_text += '}\\n'\n else:\n caption_text = '\\\\caption{'\n caption_text += kwargs['caption']\n caption_text += '}\\n'\n\n\n t = []\n t.append('\\\\begin{table}[h]\\n')\n t.append('\\\\begin{tabular}{|p{0.6\\\\textwidth}|p{0.4\\\\textwidth}|}\\n')\n t.append('\\\\hline\\n')\n t.append('\\\\textbf{Simulation Parameter} & \\\\textbf{Value or Values} \\\\\\\\ \\n')\n t.append('\\\\hline\\n')\n\n for var in self._get_public_variables():\n s = self.parameter_labels[var[0]]\n s += ' & '\n\n\n # need to know if var[1] is a single integer, or a list\n if hasattr(var[1], '__iter__'):\n s += ', '.join(map(str, var[1]))\n else:\n s += str(var[1])\n\n s += '\\\\\\\\ \\n'\n t.append(s)\n\n\n t.append('\\\\hline\\n')\n t.append('\\\\end{tabular}\\n')\n t.append(caption_text)\n t.append('\\\\label{tab:ctpy-sim-parameters}\\n')\n t.append('\\\\end{table}\\n')\n\n return ''.join(t)", "def create_latex_tables_for_model(self, config, model_name, dir):\r\n config = Configuration(config)\r\n model_system = ModelSystem()\r\n input_db, output_db = model_system._get_database_connections(config)\r\n sql_storage = StorageFactory().get_storage('sql_storage', storage_location=input_db)\r\n #TODO: only do the next stuff if this model has coefficients\r\n if 'controller' not in config['models_configuration'][model_name]:\r\n return\r\n if 'prepare_for_run' not in config['models_configuration'][model_name]['controller']:\r\n return\r\n if 'coefficients' not in config['models_configuration'][model_name]['controller']['prepare_for_run']['output']:\r\n return\r\n specification_table_name = config['models_configuration'][model_name].get('specification_table', None)\r\n coefficents_table_name = config['models_configuration'][model_name].get('coefficients_table', None)\r\n (specification, coefficients) = prepare_specification_and_coefficients(\r\n specification_storage=sql_storage,\r\n specification_table=specification_table_name,\r\n coefficients_storage=sql_storage,\r\n coefficients_table=coefficents_table_name)\r\n\r\n self.create_latex_table_for_coefficients_for_model(coefficients, model_name, dir)\r\n self.create_latex_table_for_specifications_for_model(specification, model_name, dir)", "def __export_to_latex(self):\n\n self.__create_export_dir()\n\n metrics = [\"Best\", \"Median\", \"Worst\", \"Mean\", \"Std.\"]\n\n def only_upper(s):\n return \"\".join(c for c in s if c.isupper())\n\n with open(self.__generate_export_name(\"tex\"), \"a\") as outFile:\n outFile.write(\"\\\\documentclass{article}\\n\")\n outFile.write(\"\\\\usepackage[utf8]{inputenc}\\n\")\n outFile.write(\"\\\\usepackage{siunitx}\\n\")\n outFile.write(\"\\\\sisetup{\\n\")\n outFile.write(\"round-mode=places,round-precision=3}\\n\")\n outFile.write(\"\\\\begin{document}\\n\")\n outFile.write(\"\\\\begin{table}[h]\\n\")\n outFile.write(\"\\\\centering\\n\")\n begin_tabular = \"\\\\begin{tabular}{cc\"\n for alg in self.results:\n for _i in range(len(self.results[alg])):\n begin_tabular += \"S\"\n firstLine = \" &\"\n for benchmark in self.results[alg].keys():\n firstLine += \" & \\\\multicolumn{1}{c}{\\\\textbf{\" + benchmark + \"}}\"\n firstLine += \" \\\\\\\\\"\n break\n begin_tabular += \"}\\n\"\n outFile.write(begin_tabular)\n outFile.write(\"\\\\hline\\n\")\n outFile.write(firstLine + \"\\n\")\n outFile.write(\"\\\\hline\\n\")\n for alg in self.results:\n for metric in metrics:\n line = \"\"\n if metric != \"Worst\":\n line += \" & \" + metric\n else:\n shortAlg = \"\"\n if alg.endswith(\"Algorithm\"):\n shortAlg = only_upper(alg[:-9])\n else:\n shortAlg = only_upper(alg)\n line += \"\\\\textbf{\" + shortAlg + \"} & \" + metric\n for benchmark in self.results[alg]:\n if metric == \"Best\":\n line += \" & \" + str(amin(self.results[alg][benchmark]))\n elif metric == \"Median\":\n line += \" & \" + str(median(self.results[alg][benchmark]))\n elif metric == \"Worst\":\n line += \" & \" + str(amax(self.results[alg][benchmark]))\n elif metric == \"Mean\":\n line += \" & \" + str(mean(self.results[alg][benchmark]))\n else:\n line += \" & \" + str(std(self.results[alg][benchmark]))\n line += \" \\\\\\\\\"\n outFile.write(line + \"\\n\")\n outFile.write(\"\\\\hline\\n\")\n outFile.write(\"\\\\end{tabular}\\n\")\n outFile.write(\"\\\\end{table}\\n\")\n outFile.write(\"\\\\end{document}\")\n logger.info(\"Export to Latex completed!\")", "def _build_estimation_table_footer(\n models,\n stats_options,\n significance_levels,\n show_stars,\n number_format,\n add_trailing_zeros,\n max_trail,\n):\n to_concat = [\n _create_statistics_sr(\n mod,\n stats_options,\n significance_levels,\n show_stars,\n number_format,\n add_trailing_zeros,\n max_trail,\n )\n for mod in models\n ]\n stats = pd.concat(to_concat, axis=1)\n return stats", "def gen_html(*args):\n return gen_html.gen_html(gen_html.transpose(TEST_DATA), *args)[0]", "def to_html(self,fn='tableone.html'):\n tablefmt = 'html'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def HTMLSummary():\n Output = open('Results.html', 'w')\n Output.write( \"<html><head><title>Summary</title></head>\\n\")\n Query = \"SELECT * FROM experiments ORDER BY experiment\"\n \n cur.execute(Query)\n AllExperiments = cur.fetchall()\n \n for Table, TimeField, Fields in TableDefs:\n print Table\n Query = ReturnQuery(Table, TimeField, Fields)\n cur.execute(Query)\n \n GasResults = cur.fetchall()\n AppendToMasterTable(AllExperiments, GasResults)\n\n cur.execute(\"SELECT MAX(experiment) FROM experiments\")\n MaxExperiment = cur.fetchone()\n AppendToMasterTable(AllExperiments,GetGasVolume(range(1,int(MaxExperiment[0])+1,1)))\n \n Output.write(\"<table border=\\\"1\\\">\\n\")\n #Need to generate table headers here\n Query = \"select column_name from information_schema.columns where table_name='experiments';\"\n cur.execute(Query)\n Rows = cur.fetchall()\n \n Output.write(\"\\t<tr>\\n\")\n for Row in Rows:\n Output.write(\"\\t\\t<th>{}</th>\\n\".format(Row[0]))\n \n for Table, TimeField, Fields in TableDefs:\n for Field in Fields:\n Output.write(\"\\t\\t<th>{}</th>\\n\".format(Field))\n Output.write(\"\\t\\t<th>Gas Volume</th>\\n\\t</tr>\\n\")\n \n #Write out all data\n for ExperimentRow in AllExperiments:\n Output.write( \"\\t<tr>\\n\")\n for ExpVal in ExperimentRow:\n Output.write( \"\\t\\t<td>{}</td>\\n\".format(ExpVal))\n Output.write(\"\\t</tr>\\n\")\n Output.write( \"</table>\")\n Output.write( \"</body>\\n</html>\")", "def gen_table(output_list):\n s = []\n for i in range(len(output_list)):\n try:\n # s2 =\"<div> hELLO </div>\"\n obj = output_list[i]\n # print \"object\", obj\n s2 = \"<div class='display_res'>\"\n s2 += \"<div class='sub'><img class='image-circle' src='\" + obj['image_url'] + \"' height=100 width=100></img>\" + \"</div>\"\n s2 += \"<div class='sub'><h4><a href='\" + obj['url'] + \"'>\" + obj['name'] + \"</a></h4></div>\"\n s2 += \"<div class='sub'>\" + \"Price: \" + obj['price'] + \"</div>\"\n s2 += \"<div class='sub'>\" + \"Review count: \" + str(obj['review_count']) + \"</div>\"\n s2 += \"<div class='sub'>\" + \" Similar aspects: \" + string_words(obj['categories']) + \"</div>\"\n s2 += \"<div class='sub'>\" + str(obj['rating']) + \" stars</div>\"\n s2 += \"<div class='sub'>\" + string_obj(obj['location']['display_address']) + \"</div>\"\n s2 += \"<div class='sub'>\" + \"Phone: \" + obj['display_phone'] + \"</div>\"\n s2 += \"</div><br> <br>\"\n s2 = unidecode(s2)\n s.append(format_html(\"{}\", mark_safe(s2)))\n except:\n pass\n return s", "def tableToHTML( self ):\n output = ''\n output += '<div class=\"myTable\">'\n\n output += '<div class=\"myTableHeader\">'\n output += '<ul>'\n for col in self.columns:\n output += '<li>%s</li>' % col\n output += '</ul>'\n output += '</div>'\n\n for row in range( 0, len( self.tableData ) ):\n output += '<div class=\"myTableRow%d\">' % ( ( row % 2 ) + 1 )\n output += '<ul>'\n for col in self.tableData[row]:\n output += '<li>%s</li>' % col\n output += '</ul>'\n output += '</div>'\n\n output += '</div>'\n\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create body and footer blocs with significance stars and inference values. Applies number formatting to parameters and summary statitistics. Concatinates infere values to parameter values if applicable, Adds significance stars if applicable.
def _get_estimation_table_body_and_footer( models, column_names, column_groups, custom_param_names, custom_index_names, significance_levels, stats_options, show_col_names, show_col_groups, show_stars, show_inference, confidence_intervals, number_format, add_trailing_zeros, ): body, max_trail = _build_estimation_table_body( models, column_names, column_groups, custom_param_names, custom_index_names, show_col_names, show_col_groups, show_inference, show_stars, confidence_intervals, significance_levels, number_format, add_trailing_zeros, ) footer = _build_estimation_table_footer( models, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ) footer.columns = body.columns return body, footer
[ "def _build_estimation_table_footer(\n models,\n stats_options,\n significance_levels,\n show_stars,\n number_format,\n add_trailing_zeros,\n max_trail,\n):\n to_concat = [\n _create_statistics_sr(\n mod,\n stats_options,\n significance_levels,\n show_stars,\n number_format,\n add_trailing_zeros,\n max_trail,\n )\n for mod in models\n ]\n stats = pd.concat(to_concat, axis=1)\n return stats", "def generate_footer_html(self):\n footer = '<td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n\n if not self.show_footer:\n return footer\n footer += self.generate_observations_html()\n footer += self.generate_r2_html()\n footer += self.generate_r2_adj_html()\n if self.show_residual_std_err:\n footer += self.generate_resid_std_err_html()\n if self.show_f_statistic:\n footer += self.generate_f_statistic_html()\n footer += '<tr><td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n footer += self.generate_notes_html()\n footer += '</table>'\n\n return footer", "def apbsWriteCalculationParams(self, fp, molname):\n if(self.pbeType=='Linearized'):\n fp.write('\\tlpbe\\n')\n else:\n fp.write('\\tnpbe\\n')\n \n if(self.boundaryConditions=='Zero E'):\n fp.write('\\tbcfl zero\\n')\n elif(self.boundaryConditions=='Single Debye-Huckel'):\n fp.write('\\tbcfl sdh\\n')\n else: fp.write('\\tbcfl mdh\\n')\n\n if(self.chargeDiscretization=='Trilinear hat-function'):\n fp.write('\\tchgm spl0\\n')\n elif self.chargeDiscretization == 'Cubic B-spline':\n fp.write('\\tchgm spl2\\n')\n else:\n fp.write('\\tchgm spl4\\n')\n \n \n if(self.surfaceCalculation=='No smoothing'):\n fp.write('\\tsrfm mol\\n')\n fp.write('\\tsdens %.3f\\n'%(self.sdens))\n elif(self.surfaceCalculation=='Harmonic Average'):\n fp.write('\\tsrfm smol\\n')\n fp.write('\\tsdens %.3f\\n'%(self.sdens))\n elif self.surfaceCalculation == 'Cubic B-spline':\n fp.write('\\tsrfm spl2\\n')\n fp.write('\\tswin %.3f\\n'%(self.splineWindow))\n else:\n fp.write('\\tsrfm spl4\\n')\n fp.write('\\tswin %.3f\\n'%(self.splineWindow))\n \n \n if(self.energyOutput==''):\n fp.write('\\tcalcenergy no\\n')\n elif(self.energyOutput=='Total'):\n fp.write('\\tcalcenergy total\\n')\n else: fp.write('\\tcalcenergy comps\\n')\n\n if(self.forceOutput==''):\n fp.write('\\tcalcforce no\\n')\n elif(self.forceOutput=='Total'):\n fp.write('\\tcalcforce total\\n')\n else: fp.write('\\tcalcforce comps\\n')\n\n tempFileString = molname + '.chargeDistribution'\n if (self.chargeDistributionFile=='OpenDX'): \n fp.write('\\twrite charge dx %s\\n' % tempFileString)\n elif(self.chargeDistributionFile=='AVS UCD'):\n fp.write('\\twrite charge avs %s\\n' % tempFileString)\n elif(self.chargeDistributionFile=='UHBD'):\n fp.write('\\twrite charge uhbd %s\\n'%tempFileString)\n\n tempFileString = molname +'.potential'\n if (self.potentialFile=='OpenDX'):\n fp.write('\\twrite pot dx %s\\n' % tempFileString)\n elif(self.potentialFile=='AVS UCD'):\n fp.write('\\twrite pot avs %s\\n' % tempFileString)\n elif(self.potentialFile=='UHBD'):\n fp.write('\\twrite pot uhbd %s\\n'%tempFileString)\n\n tempFileString = molname + '.solventAccessibility'\n if (self.solventAccessibilityFile=='OpenDX'):\n fp.write('\\twrite smol dx %s\\n' % tempFileString)\n elif(self.solventAccessibilityFile=='AVS UCD'):\n fp.write('\\twrite smol avs %s\\n' % tempFileString)\n elif(self.solventAccessibilityFile=='UHBD'):\n fp.write('\\twrite smol uhbd %s\\n'%tempFileString)\n\n tempFileString = molname + '.splineBasedAccessibility'\n if (self.splineBasedAccessibilityFile=='OpenDX'):\n fp.write('\\twrite sspl dx %s\\n' % tempFileString)\n elif(self.splineBasedAccessibilityFile=='AVS UCD'):\n fp.write('\\twrite sspl avs %s\\n' % tempFileString)\n elif(self.splineBasedAccessibilityFile=='UHBD'):\n fp.write('\\twrite sspl uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.VDWAccessibility'\n if (self.VDWAccessibilityFile=='OpenDX'): \n fp.write('\\twrite vdw dx %s\\n' % tempFileString)\n elif(self.VDWAccessibilityFile=='AVS UCD'):\n fp.write('\\twrite vdw avs %s\\n' % tempFileString)\n elif(self.VDWAccessibilityFile=='UHBD'):\n fp.write('\\twrite vdw uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.ionAccessibility'\n if (self.ionAccessibilityFile=='OpenDX'):\n fp.write('\\twrite ivdw dx %s\\n' % tempFileString)\n elif(self.ionAccessibilityFile=='AVS UCD'):\n fp.write('\\twrite ivdw avs %s\\n' % tempFileString)\n elif(self.ionAccessibilityFile=='UHBD'):\n fp.write('\\twrite ivdw uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.laplacianOfPotential'\n if (self.laplacianOfPotentialFile=='OpenDX'):\n fp.write('\\twrite lap dx %s\\n' % tempFileString)\n elif(self.laplacianOfPotentialFile=='AVS UCD'):\n fp.write('\\twrite lap avs %s\\n' % tempFileString)\n elif(self.laplacianOfPotentialFile=='UHBD'):\n fp.write('\\twrite lap uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.energyDensity'\n if (self.energyDensityFile=='OpenDX'): \n fp.write('\\twrite edens dx %s\\n' % tempFileString)\n elif(self.energyDensityFile=='AVS UCD'): \n fp.write('\\twrite edens avs %s\\n' % tempFileString)\n elif(self.energyDensityFile=='UHBD'):\n fp.write('\\twrite edens uhbd %s\\n'% tempFileString)\n\n tempFileString = molname +'.ionNumber'\n if (self.ionNumberFile=='OpenDX'):\n fp.write('\\twrite ndens dx %s\\n' % tempFileString)\n elif(self.ionNumberFile=='AVS UCD'): \n fp.write('\\twrite ndens avs %s\\n' % tempFileString)\n elif(self.ionNumberFile=='UHBD'): \n fp.write('\\twrite ndens uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.ionChargeDensity'\n if (self.ionChargeDensityFile=='OpenDX'):\n fp.write('\\twrite qdens dx %s\\n' % tempFileString)\n elif(self.ionChargeDensityFile=='AVS UCD'):\n fp.write('\\twrite qdens avs %s\\n' % tempFileString)\n elif(self.ionChargeDensityFile=='UHBD'):\n fp.write('\\twrite qdens uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.xShiftedDielectric'\n if (self.xShiftedDielectricFile=='OpenDX'):\n fp.write('\\twrite dielx dx %s\\n' % tempFileString)\n elif(self.xShiftedDielectricFile=='AVS UCD'):\n fp.write('\\twrite dielx avs %s\\n' % tempFileString)\n elif(self.xShiftedDielectricFile=='UHBD'):\n fp.write('\\twrite dielx uhbd %s\\n'% tempFileString)\n\n tempFileString = molname + '.yShiftedDielectric'\n if (self.yShiftedDielectricFile=='OpenDX'):\n fp.write('\\twrite diely dx %s\\n' % tempFileString)\n elif(self.yShiftedDielectricFile=='AVS UCD'):\n fp.write('\\twrite diely avs %s\\n' % tempFileString)\n elif(self.yShiftedDielectricFile=='UHBD'):\n fp.write('\\twrite diely uhbd %s\\n'%tempFileString)\n\n tempFileString = molname + '.zShiftedDielectric'\n if (self.zShiftedDielectricFile=='OpenDX'): \n fp.write('\\twrite dielz dx %s\\n' % tempFileString)\n elif(self.zShiftedDielectricFile=='AVS UCD'):\n fp.write('\\twrite dielz avs %s\\n' % tempFileString)\n elif(self.zShiftedDielectricFile=='UHBD'):\n fp.write('\\twrite dielz uhbd %s\\n'%tempFileString)\n\n tempFileString = molname + '.kappaFunction'\n if (self.kappaFunctionFile=='OpenDX'):\n fp.write('\\twrite kappa dx %s\\n' % tempFileString)\n elif(self.kappaFunctionFile=='AVS UCD'):\n fp.write('\\twrite kappa avs %s\\n' % tempFileString)\n elif(self.kappaFunctionFile=='UHBD'):\n fp.write('\\twrite kappa uhbd %s\\n'%tempFileString)\n fp.write('\\n')", "def WriteFooter(self):\n pass", "def GenerateStartBody(self, decorations=1):\n\n return \"<body>\"", "def printConstants(self):\n print('h = {:.14e} Js'.format(const.h))\n print('c = {:.14e} m/s'.format(const.c))\n print('k = {:.14e} J/K'.format(const.k))\n print('q = {:.14e} C'.format(const.e))\n\n print(' ')\n print('pi = {:.14e}'.format(const.pi))\n print('e = {:.14e}'.format(np.exp(1)))\n print('zeta(3) = {:.14e}'.format(self.zeta3 ))\n print('a2 = {:.14e}, root of 2(1-exp(-x))-x'.format(self.a2 ))\n print('a3 = {:.14e}, root of 3(1-exp(-x))-x'.format(self.a3 ))\n print('a4 = {:.14e}, root of 4(1-exp(-x))-x'.format(self.a4 ))\n print('a5 = {:.14e}, root of 5(1-exp(-x))-x'.format(self.a5 ))\n\n print(' ')\n print('sigmae = {:.14e} W/(m^2 K^4)'.format(self.sigmae))\n print('sigmaq = {:.14e} q/(s m^2 K^3)'.format(self.sigmaq))\n print(' ')\n print('c1em = {:.14e} with wavelenth in m'.format(self.c1em))\n print('c1qm = {:.14e} with wavelenth in m'.format(self.c1qm))\n print('c2m = {:.14e} with wavelenth in m'.format(self.c2m))\n print(' ')\n print('c1el = {:.14e} with wavelenth in $\\mu$m'.format(self.c1el))\n print('c1ql = {:.14e} with wavelenth in $\\mu$m'.format(self.c1ql))\n print('c2l = {:.14e} with wavelenth in $\\mu$m'.format(self.c2l))\n print(' ')\n print('c1en = {:.14e} with wavenumber in cm$^{{-1}}$'.format(self.c1en))\n print('c1qn = {:.14e} with wavenumber in cm$^{{-1}}$'.format(self.c1qn))\n print('c2n = {:.14e} with wavenumber in cm$^{{-1}}$'.format(self.c2n))\n print(' ')\n print('c1ef = {:.14e} with frequency in Hz'.format(self.c1ef))\n print('c1nf = {:.14e} with frequency in Hz'.format(self.c1nf))\n print('c2f = {:.14e} with frequency in Hz'.format(self.c2f))\n print(' ')\n print('wel = {:.14e} um.K Wien for radiant and wavelength'.format(self.wel))\n print('wql = {:.14e} um.K Wien for photon rate and wavelength'.format(self.wql))\n print('wen = {:.14e} cm-1/K Wien for radiant and wavenumber'.format(self.wen))\n print('wqn = {:.14e} cm-1/K Wien for photon rate and wavenumber'.format(self.wqn))\n print('wef = {:.14e} Hz/K Wien for radiant and frequency'.format(self.wef))\n print('wqf = {:.14e} Hz/K Wien for photon rate and frequency'.format(self.wqf))\n print(' ')", "def print_summary_data(resids, res_freq, out):\n\n no_res = sct.seq.sum_res_no(resids, res_freq)\n\n if no_res == 0:\n out.write(\"No such residues in the provided input\\n\")\n return\n\n # compute total mass and D/H scattering lengths\n mass = sct.seq.sum_mass(resids, res_freq)\n bH_tot = sct.seq.sum_b(resids, res_freq, False)\n bD_tot = sct.seq.sum_b(resids, res_freq, True)\n\n # whole = is this calculation for the entire glyco protein\n # If it is we will be printing different values\n whole = False\n\n if len(resids) != len(sct.seq.all_residues):\n\n total_mass = sct.seq.sum_mass(sct.seq.all_residues, res_freq)\n\n frac_mass = 100 * mass / total_mass\n\n out.write(\n \"Molecular Weight: {0:5.0f} Fraction of Total: {1:3.2f} Residues: {2:4d}\\n\".format(\n mass,\n frac_mass,\n no_res))\n\n else:\n\n whole = True\n\n out.write(\"Molecular Weight: {0:5.0f}\\n\".format(mass))\n\n abs_coeffs = sct.seq.calc_absorption_coeffs(res_freq, mass)\n\n out.write(\n \"Absorption coefficient (280 nM): {0:7.3f}\\n\".format(\n abs_coeffs[0]))\n out.write(\n \"Absorption coefficient x 1.03: {0:7.3f}\\n\".format(\n abs_coeffs[1]))\n out.write(\n \"Absorption coefficient x 1.06: {0:7.3f}\\n\".format(\n abs_coeffs[2]))\n\n # Calculate the contribution of the hydration layer to the\n # scattering length (b)\n vol_diff, oh_diff = sct.seq.calc_hydration_effect(res_freq)\n\n no_prot_res = sct.seq.sum_res_no(sct.seq.amino_acids, res_freq)\n hydra_per_res = oh_diff / no_prot_res\n\n hydra_delta = sct.seq.params['solvent']['vol_bound'] * oh_diff\n\n bH_tot_hydr = bH_tot + (sct.seq.params['solvent']['BOH'] * oh_diff)\n bD_tot_hydr = bD_tot + (sct.seq.params['solvent']['BOD'] * oh_diff)\n\n out.write(\n \"Total b in H2O: {0:8.3f} D2O: {1:8.3f}\\n\".format(\n bH_tot,\n bD_tot))\n out.write(\"Total b on M in H2O: {0:8.6f} D2O: {1:8.6f}\\n\".format(\n bH_tot / mass, bD_tot / mass))\n\n out.write(\n \"Scattering density of water H2O: {0:7.6f} D2O: {1:7.6f}\\n\".format(\n sct.seq.params['solvent']['BHHO'],\n sct.seq.params['solvent']['BDDO']))\n\n total_electrons = sct.seq.sum_electrons(resids, res_freq)\n out.write(\"Total no. electrons:\\t\\t{0:10.0f}\\n\".format(total_electrons))\n\n out.write(\"Electron density of water:\\t{0:10.6f}\\n\".format(\n sct.seq.params['solvent']['EHHO']))\n\n # Sort the names of the volume datasets for output\n vol_datasets = sorted(six.iterkeys(sct.seq.res_vols))\n\n out.write(\n create_volume_title(\n \" \",\n \" \",\n vol_datasets,\n 'aa'))\n vol_line = \"Volume \"\n spec_v_line = \"Specific Volume \"\n match_line = \"Match Point \"\n scat_line = \"Scattering Density at MPt \"\n elect_line = \"Electron Density \"\n\n if whole:\n hyd_vol_line = \"Volume \"\n hyd_match_line = \"Match Point \"\n\n # Create lines containing data for output\n for dataset in vol_datasets:\n tot_volume = sct.seq.sum_volume(resids, res_freq, dataset)\n vol_line += ' {0:7.0f}'.format(tot_volume)\n\n specific_volume = sct.seq.spec_volume(resids, res_freq, dataset)\n spec_v_line += ' {0:7.4f}'.format(specific_volume)\n\n match_point = calc_match_point(tot_volume, bH_tot, bD_tot)\n match_line += ' {0:7.2f}'.format(match_point)\n\n scat_density = calc_mpt_scattering_density(match_point)\n scat_line += ' {0:7.5f}'.format(scat_density)\n\n elect_density = sct.seq.sum_electrons(resids, res_freq) / tot_volume\n elect_line += ' {0:7.5f}'.format(elect_density)\n\n # Additional lines about protein hydration for the full protein\n if whole:\n hydr_vol = tot_volume + hydra_delta\n hyd_vol_line += ' {0:7.0f}'.format(hydr_vol)\n hydr_match_point = calc_match_point(\n hydr_vol,\n bH_tot_hydr,\n bD_tot_hydr)\n hyd_match_line += ' {0:7.2f}'.format(hydr_match_point)\n\n out.write(vol_line + '\\n')\n out.write(spec_v_line + '\\n')\n out.write(match_line + '\\n')\n out.write(scat_line + '\\n')\n out.write(elect_line + '\\n')\n\n if whole:\n out.write(\n \"********* HYDRATION OF TOTAL GLYCOPROTEIN BY OH GROUPS *********************************\\n\")\n out.write(\n \"Difference in CHO75 and CON85 Volumes: {0:7.0f} Total of equivalent bound H2O: {1:7.0f}\\n\".format(\n vol_diff,\n oh_diff))\n out.write(\n \"Average H20 per AA Residue: {0:7.2f}\\n\".format(hydra_per_res))\n out.write(\n \"Total b in H2O: {0:8.3f} D2O: {1:8.3f}\\n\".format(\n bH_tot_hydr,\n bD_tot_hydr))\n out.write(\n create_volume_title(\n \" \",\n \" \",\n vol_datasets,\n 'aa'))\n out.write(hyd_vol_line + '\\n')\n out.write(hyd_match_line + '\\n')", "def collect_process_statistics(N, total_bp1, total_bp2, modifiers, filters, formatters):\n stats = defaultdict(lambda: [0,0])\n \n written, written_bp = formatters.summary()\n assert written is not None\n \n stats.update(dict(\n N=N,\n total_bp1=total_bp1,\n total_bp2=total_bp2,\n total_bp=total_bp1 + total_bp2,\n written=written,\n written_bp=written_bp,\n total_written_bp=sum(written_bp),\n ))\n \n stats[\"too_short\"] = None\n if TooShortReadFilter in filters:\n stats[\"too_short\"] = filters[TooShortReadFilter].filtered\n \n stats[\"too_long\"] = None\n if TooLongReadFilter in filters:\n stats[\"too_long\"] = filters[TooLongReadFilter].filtered\n \n stats[\"too_many_n\"] = None\n if NContentFilter in filters:\n stats[\"too_many_n\"] = filters[NContentFilter].filtered\n \n # TODO: generalize this\n if modifiers.has_modifier(InsertAdapterCutter):\n insert_cutter = modifiers.get_modifiers(InsertAdapterCutter)[0]\n stats[\"with_adapters\"] = insert_cutter.with_adapters\n stats[\"corrected\"] = insert_cutter.corrected_pairs\n stats[\"corrected_bp\"] = insert_cutter.corrected_bp\n stats[\"total_corrected_bp\"] = sum(insert_cutter.corrected_bp)\n else:\n stats[\"with_adapters\"] = [0, 0]\n if modifiers.has_modifier(AdapterCutter):\n adapter_cutters = modifiers.get_modifiers(AdapterCutter)[0]\n for read, modifier in enumerate(adapter_cutters):\n if modifier:\n stats[\"with_adapters\"][read] += modifier.with_adapters\n \n for modifier_class in modifiers.get_trimmer_classes():\n for modifier in modifiers.get_modifiers(modifier_class, 1):\n key = \"{}_bp\".format(type(modifier).__name__)\n stats[key][0] = modifier.trimmed_bases\n \n for modifier in modifiers.get_modifiers(modifier_class, 2):\n key = \"{}_bp\".format(type(modifier).__name__)\n stats[key][1] = modifier.trimmed_bases\n \n name = modifier_class.__name__\n stats[name] = sum(stats[\"{}_bp\".format(name)])\n \n return dict(stats)", "def printConstants(self):\n print('h = {:.14e} Js'.format(const.h))\n print('c = {:.14e} m/s'.format(const.c))\n print('k = {:.14e} J/K'.format(const.k))\n print('q = {:.14e} C'.format(const.e))\n\n print(' ')\n print('pi = {:.14e}'.format(const.pi))\n print('e = {:.14e}'.format(numpy.exp(1)))\n print('zeta(3) = {:.14e}'.format(self.zeta3 ))\n print('a2 = {:.14e}, root of 2(1-exp(-x))-x'.format(self.a2 ))\n print('a3 = {:.14e}, root of 3(1-exp(-x))-x'.format(self.a3 ))\n print('a4 = {:.14e}, root of 4(1-exp(-x))-x'.format(self.a4 ))\n print('a5 = {:.14e}, root of 5(1-exp(-x))-x'.format(self.a5 ))\n\n print(' ')\n print('sigmae = {:.14e} W/(m^2 K^4)'.format(self.sigmae))\n print('sigmaq = {:.14e} q/(s m^2 K^3)'.format(self.sigmaq))\n print(' ')\n print('c1em = {:.14e} with wavelenth in m'.format(self.c1em))\n print('c1qm = {:.14e} with wavelenth in m'.format(self.c1qm))\n print('c2m = {:.14e} with wavelenth in m'.format(self.c2m))\n print(' ')\n print('c1el = {:.14e} with wavelenth in $\\mu$m'.format(self.c1el))\n print('c1ql = {:.14e} with wavelenth in $\\mu$m'.format(self.c1ql))\n print('c2l = {:.14e} with wavelenth in $\\mu$m'.format(self.c2l))\n print(' ')\n print('c1en = {:.14e} with wavenumber in cm$^{{-1}}$'.format(self.c1en))\n print('c1qn = {:.14e} with wavenumber in cm$^{{-1}}$'.format(self.c1qn))\n print('c2n = {:.14e} with wavenumber in cm$^{{-1}}$'.format(self.c2n))\n print(' ')\n print('c1ef = {:.14e} with frequency in Hz'.format(self.c1ef))\n print('c1nf = {:.14e} with frequency in Hz'.format(self.c1nf))\n print('c2f = {:.14e} with frequency in Hz'.format(self.c2f))\n print(' ')\n print('wel = {:.14e} um.K'.format(self.wel))\n print('wql = {:.14e} um.K'.format(self.wql))\n print('wen = {:.14e} cm-1/K'.format(self.wen))\n print('wqn = {:.14e} cm-1/K'.format(self.wqn))\n print('wef = {:.14e} Hz/K'.format(self.wef))\n print('wqf = {:.14e} Hz/K'.format(self.wqf))\n print(' ')", "def output_sampled_params_table(self):\n # get length of longest display name\n max_length = max([len(display_name) for display_name in self.optimization_defs.keys()])\n left_sep = \"| \"\n right_sep = \" |\"\n center_sep = \" | \"\n with open(\"sampled_params.md\", 'w') as table_file:\n # Write table headers\n table_file.write(left_sep)\n for i, display_name in enumerate(self.optimization_defs.keys()):\n table_file.write(display_name.rjust(max_length, ' ')) # rjust fills string with spaces\n # write center or right separator, depending on whether we're at the last element\n table_file.write((center_sep if not i == len(self.optimization_defs.keys()) - 1 else right_sep))\n # Write table header separator\n table_file.write('\\n' + left_sep)\n for i in range(len(self.optimization_defs)):\n # the colon position defines alignment of column text, in this case to the right\n table_file.write('-' * max_length + (\":| \" if not i == len(self.optimization_defs.keys()) - 1 else \":|\"))\n # For each sample, create a row\n for x in self.optimizer.X:\n # Write sample's row\n table_file.write('\\n' + left_sep)\n for i, display_name in enumerate(self.optimization_defs.keys()):\n param_value = round(x[self._to_optimizer_id(display_name)], self._params['rounding_decimal_places'])\n table_file.write(str(param_value).rjust(max_length, ' '))\n # write center or right separator, depending on whether we're at the last element\n table_file.write((center_sep if not i == len(self.optimization_defs.keys()) - 1 else right_sep))", "def apbsWriteGridParams(self, fp):\n fp.write('\\tdime %d %d %d\\n\\n'%(\n self.gridPointsX,self.gridPointsY, self.gridPointsZ))\n fp.write('\\tcglen %.3f %.3f %.3f\\n'%(\n self.coarseLengthX,self.coarseLengthY, self.coarseLengthZ))\n fp.write('\\tcgcent %.3f %.3f %.3f\\n'%(\n self.coarseCenterX,self.coarseCenterY, self.coarseCenterZ))\n fp.write('\\tfglen %.3f %.3f %.3f\\n'%(\n self.fineLengthX,self.fineLengthY, self.fineLengthZ))\n fp.write('\\tfgcent %.3f %.3f %.3f\\n'%(\n self.fineCenterX,self.fineCenterY, self.fineCenterZ))\n fp.write('\\n')", "def fBlock(self, tag, atts, ext, cite, content):\r\n atts = self.pba(atts)\r\n o1 = o2 = c2 = c1 = ''\r\n\r\n m = re.search(r'fn(\\d+)', tag)\r\n if m:\r\n tag = 'p'\r\n if m.group(1) in self.fn:\r\n fnid = self.fn[m.group(1)]\r\n else:\r\n fnid = m.group(1)\r\n atts = atts + ' id=\"fn%s\"' % fnid\r\n if atts.find('class=') < 0:\r\n atts = atts + ' class=\"footnote\"'\r\n content = ('<sup>%s</sup>' % m.group(1)) + content\r\n\r\n if tag == 'bq':\r\n cite = self.checkRefs(cite)\r\n if cite:\r\n cite = ' cite=\"%s\"' % cite\r\n else:\r\n cite = ''\r\n o1 = \"\\t<blockquote%s%s>\\n\" % (cite, atts)\r\n o2 = \"\\t\\t<p%s>\" % atts\r\n c2 = \"</p>\"\r\n c1 = \"\\n\\t</blockquote>\"\r\n\r\n elif tag == 'bc':\r\n o1 = \"<pre%s>\" % atts\r\n o2 = \"<code%s>\" % atts\r\n c2 = \"</code>\"\r\n c1 = \"</pre>\"\r\n content = self.shelve(self.encode_html(content.rstrip(\"\\n\") +\r\n \"\\n\"))\r\n\r\n elif tag == 'notextile':\r\n content = self.shelve(content)\r\n o1 = o2 = ''\r\n c1 = c2 = ''\r\n\r\n elif tag == 'pre':\r\n content = self.shelve(self.encode_html(content.rstrip(\"\\n\") +\r\n \"\\n\"))\r\n o1 = \"<pre%s>\" % atts\r\n o2 = c2 = ''\r\n c1 = '</pre>'\r\n\r\n else:\r\n o2 = \"\\t<%s%s>\" % (tag, atts)\r\n c2 = \"</%s>\" % tag\r\n\r\n content = self.graf(content)\r\n return o1, o2, content, c2, c1", "def get_fit_instructions():\n\n dust = {}\n dust[\"type\"] = \"CF00\"\n dust[\"eta\"] = 2.\n dust[\"Av\"] = (0., 8.0)\n dust[\"n\"] = (0.3, 1.5)\n dust[\"n_prior\"] = \"Gaussian\"\n dust[\"n_prior_mu\"] = 0.7\n dust[\"n_prior_sigma\"] = 0.3\n\n nebular = {}\n nebular[\"logU\"] = -3.\n\n dblplaw = {}\n dblplaw[\"massformed\"] = (0., 13.)\n dblplaw[\"metallicity\"] = (0.01, 2.5)\n dblplaw[\"metallicity_prior\"] = \"log_10\"\n dblplaw[\"alpha\"] = (0.01, 1000.)\n dblplaw[\"alpha_prior\"] = \"log_10\"\n dblplaw[\"beta\"] = (0.01, 1000.)\n dblplaw[\"beta_prior\"] = \"log_10\"\n dblplaw[\"tau\"] = (0.1, 15.)\n\n noise = {}\n noise[\"type\"] = \"GP_exp_squared\"\n noise[\"scaling\"] = (0.1, 10.)\n noise[\"scaling_prior\"] = \"log_10\"\n noise[\"norm\"] = (0.0001, 1.)\n noise[\"norm_prior\"] = \"log_10\"\n noise[\"length\"] = (0.01, 1.)\n noise[\"length_prior\"] = \"log_10\"\n\n calib = {}\n calib[\"type\"] = \"polynomial_bayesian\"\n\n calib[\"0\"] = (0.5, 1.5)\n calib[\"0_prior\"] = \"Gaussian\"\n calib[\"0_prior_mu\"] = 1.\n calib[\"0_prior_sigma\"] = 0.25\n\n calib[\"1\"] = (-0.5, 0.5)\n calib[\"1_prior\"] = \"Gaussian\"\n calib[\"1_prior_mu\"] = 0.\n calib[\"1_prior_sigma\"] = 0.25\n\n calib[\"2\"] = (-0.5, 0.5)\n calib[\"2_prior\"] = \"Gaussian\"\n calib[\"2_prior_mu\"] = 0.\n calib[\"2_prior_sigma\"] = 0.25\n\n fit_instructions = {}\n fit_instructions[\"dust\"] = dust\n fit_instructions[\"dblplaw\"] = dblplaw\n fit_instructions[\"noise\"] = noise\n fit_instructions[\"calib\"] = calib\n fit_instructions[\"nebular\"] = nebular\n fit_instructions[\"redshift\"] = (0., 10.)\n fit_instructions[\"t_bc\"] = 0.01\n fit_instructions[\"veldisp\"] = (40., 400.)\n fit_instructions[\"veldisp_prior\"] = \"log_10\"\n\n return fit_instructions", "def create_body(self):\n for pos in STARTING_POS:\n self.add_fragment(pos)", "def apbsWritePhysicsParams(self, fp):\n #fp.write('\\tgamma %.3f\\n'%(self.GAMMA)) # NOTE: CONSTANT\n fp.write('\\ttemp %.3f\\n'%(self.systemTemperature))\n fp.write('\\tsrad %.3f\\n'%(self.solventRadius))\n fp.write('\\tsdie %.3f\\n'%(self.solventDielectric))\n fp.write('\\tpdie %.3f\\n'%(self.proteinDielectric))\n for i in range(0, len(self.ions)):\n fp.write('\\tion %s\\n'%(self.ions[i].toString()))\n if self.saltConcentration:\n fp.write('\\tion 1.000, %.3f, 2.000\\n'%(self.saltConcentration))\n fp.write('\\tion -1.000, %.3f, 2.000\\n'%(self.saltConcentration))\n fp.write('\\n')", "def _format_callee(func_width):\n # Unintentional, but both the summary and callee lines\n # contain the same layout of cells. Keeping the function\n # calls separate in case I ever need to change 'em.\n return MergedStats._format_summary(func_width)", "def _fill_non_empty_info(self) -> None:\n self.add_object_type_line()\n self.add_index_range_line()\n self.add_series_name_line()\n self.add_header_line()\n self.add_separator_line()\n self.add_body_lines()\n self.add_dtypes_line()\n if self.display_memory_usage:\n self.add_memory_usage_line()", "def make_model_summary(self):\n logger.info(f'Generating model summary for {self.model_name}.')\n self.json_stats['model_summary'] = {\n 'model_name': self.model_name,\n 'number_of_statements': self.latest_round.get_total_statements(),\n 'stmts_type_distr': self.latest_round.get_statement_types(),\n 'agent_distr': self.latest_round.get_agent_distribution(),\n 'stmts_by_evidence': self.latest_round.get_statements_by_evidence(),\n 'all_stmts': self.latest_round.get_english_statements_by_hash()\n }", "def _format_summary(func_width):\n return ( \"{function:<\" + str(func_width) + \"}\"\n \"\\t{ncalls_a:>8}\"\n \"\\t{ncalls_b:>8}\"\n \"\\t{cumtime_a:>9}\"\n \"\\t{cumtime_b:>9}\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create footer bloc of estimation table. Applies number formatting to parameters and summary statitistics. Concatinates infere values to parameter values if applicable, Adds significance stars if applicable.
def _build_estimation_table_footer( models, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ): to_concat = [ _create_statistics_sr( mod, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ) for mod in models ] stats = pd.concat(to_concat, axis=1) return stats
[ "def generate_footer_html(self):\n footer = '<td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n\n if not self.show_footer:\n return footer\n footer += self.generate_observations_html()\n footer += self.generate_r2_html()\n footer += self.generate_r2_adj_html()\n if self.show_residual_std_err:\n footer += self.generate_resid_std_err_html()\n if self.show_f_statistic:\n footer += self.generate_f_statistic_html()\n footer += '<tr><td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n footer += self.generate_notes_html()\n footer += '</table>'\n\n return footer", "def _Footer():\n return \"\\\\end{tabular}\\n\\\\end{center}\"", "def _get_estimation_table_body_and_footer(\n models,\n column_names,\n column_groups,\n custom_param_names,\n custom_index_names,\n significance_levels,\n stats_options,\n show_col_names,\n show_col_groups,\n show_stars,\n show_inference,\n confidence_intervals,\n number_format,\n add_trailing_zeros,\n):\n body, max_trail = _build_estimation_table_body(\n models,\n column_names,\n column_groups,\n custom_param_names,\n custom_index_names,\n show_col_names,\n show_col_groups,\n show_inference,\n show_stars,\n confidence_intervals,\n significance_levels,\n number_format,\n add_trailing_zeros,\n )\n footer = _build_estimation_table_footer(\n models,\n stats_options,\n significance_levels,\n show_stars,\n number_format,\n add_trailing_zeros,\n max_trail,\n )\n footer.columns = body.columns\n return body, footer", "def WriteFooter(self):\n pass", "def print_table_footer():\n\n print('''\\\\bottomrule\n\\\\end{tabular}\n}\n\\\\end{center}\n\\\\end{table}\\n\\n''')", "def add_footer(table: LongTable, len_data: int):\n\n table_style = TableStyle([('FONTNAME', (0, len_data - 2), (-1, len_data - 1), 'Courier-Bold'),\n ('BACKGROUND', (0, len_data - 2), (-1, len_data - 1), colors.sandybrown)\n ])\n table.setStyle(table_style)", "def print_latex_footer():\n print(\n \"\"\"\\\\bottomrule\n\\\\end{tabular}\n\\\\end{center}\n\\\\end{Large}\n\\\\end{document}\"\"\"\n )", "def createFileFooter(self):\n import_file_desc_h = open('xml_footer.txt', 'r')\n readlines = import_file_desc_h.read()\n self.fileDesXmlData.write(readlines)\n import_file_desc_h.close()", "def _add_footer(self, line, align, text, *extra):\n k = self._text_width() - 1 - len(text)\n pos = max(0, (0 if align == \"left\" else (k if align == \"right\" else k //2 )))\n self._footer.addnstr(line, pos, text, self._text_width() - 1 - pos, *extra)", "def _writeText(self,output,outputDictionary,methodToTest,separator=' '):\n if self.dynamic:\n output.write('Dynamic BasicStatistics'+ separator+ 'Pivot Parameter' + separator + self.pivotParameter + separator + os.linesep)\n quantitiesToWrite = {}\n outputResults = [outputDictionary] if not self.dynamic else outputDictionary.values()\n longestParam = max(list(len(param) for param in self.allUsedParams)+[9]) #9 is for 'Metric:'\n # use format functions to make writing matrices easier\n paramFormat = ('{:>'+str(longestParam)+'.'+str(longestParam)+'}').format\n for ts, outputDict in enumerate(outputResults):\n if self.dynamic:\n output.write('Pivot Value' +separator+ str(outputDictionary.keys()[ts]) + os.linesep)\n # do scalars metrics first\n #header\n haveScalars = list(scalar for scalar in self.scalarVals if scalar in outputDict.keys())\n if 'percentile_map' in self.parameters and len(self.parameters['percentile_map']) >0 :\n haveScalars = haveScalars + ['percentile_'+val for val in self.parameters['percentile_map'].values()]\n if len(haveScalars) > 0:\n longestScalar = max(18,max(len(scalar) for scalar in haveScalars))\n valueStrFormat = ('{:^22.22}').format\n valueFormat = '{:+.15e}'.format\n output.write(paramFormat('Metric:') + separator)\n output.write(separator.join(valueStrFormat(scalar) for scalar in haveScalars) + os.linesep)\n #body\n for param in self.allUsedParams:\n output.write(paramFormat(param) + separator)\n values = [None]*len(haveScalars)\n for s,scalar in enumerate(haveScalars):\n if param in outputDict.get(scalar,{}).keys():\n values[s] = valueFormat(outputDict[scalar][param])\n else:\n values[s] = valueStrFormat('---')\n output.write(separator.join(values) + os.linesep)\n # then do vector metrics (matrix style)\n haveVectors = list(vector for vector in self.vectorVals if vector in outputDict.keys())\n for vector in haveVectors:\n #label\n output.write(os.linesep + os.linesep)\n output.write(vector+':'+os.linesep)\n #header\n vecTargets = sorted(outputDict[vector].keys())\n output.write(separator.join(valueStrFormat(v) for v in [' ']+vecTargets)+os.linesep)\n #populate feature list\n vecFeatures = set()\n list(vecFeatures.update(set(outputDict[vector][t].keys())) for t in vecTargets)\n vecFeatures = sorted(list(vecFeatures))\n #body\n for feature in vecFeatures:\n output.write(valueStrFormat(feature)+separator)\n values = [valueStrFormat('---')]*len(vecTargets)\n for t,target in enumerate(vecTargets):\n if feature in outputDict[vector][target].keys():\n values[t] = valueFormat(outputDict[vector][target][feature])\n output.write(separator.join(values)+os.linesep)", "def write_account_footer(self, account, name_value):\n format_amt = self._get_currency_amt_header_format(account)\n for col_pos, column in self.columns.items():\n if column['field'] == 'name':\n value = name_value\n else:\n value = getattr(account, column['field'])\n cell_type = column.get('type', 'string')\n if cell_type == 'string':\n self.sheet.write_string(self.row_pos, col_pos, value or '',\n self.format_header_left)\n elif cell_type == 'amount':\n self.sheet.write_number(self.row_pos, col_pos, float(value),\n self.format_header_amount)\n elif cell_type == 'many2one':\n self.sheet.write_string(\n self.row_pos, col_pos, value.name or '',\n self.format_header_right)\n elif cell_type == 'amount_currency' and account.currency_id:\n self.sheet.write_number(\n self.row_pos, col_pos, float(value),\n format_amt)\n else:\n self.sheet.write_string(\n self.row_pos, col_pos, '',\n self.format_header_right)\n self.row_pos += 1", "def End(self):\n self.Flush()\n\n if self.formatter:\n self.formatter.WriteFooter()\n self.formatter.Close()", "def footer(self):\n output = \"\"\"\n</body> \n</html> \n\"\"\"\n return output", "def create_footer_from_template(self):\r\n self.footer_lines = []\r\n for line in _Templates().footer.split(\"\\n\"):\r\n line = line.replace(\" \", \"\\t\")\r\n self.footer_lines.append(line + \"\\n\")", "def draw_footer(canvas,pdf):\n oBank=get_object_or_404(SYSTEM_BANK, pk=1)\n oCompany=get_object_or_404(SYSTEM_MAIN, pk=1)\n oCompany=get_object_or_404(SYSTEM_MAIN, pk=1)\n note = (\n u'Bank Details: '+oBank.BankName+'',\n u'Sort Code: '+oBank.BankSortCode+' Account No: '+oBank.BankAccountNo+' (Quote invoice number).',\n u\"Please pay via bank transfer or cheque. All payments should be made in \"+oBank.Curency.Name+\"'s.\",\n u'Make cheques payable to '+oCompany.CompanyName+'.',\n )\n textobject = canvas.beginText(1 * cm, -27 * cm)\n for line in note:\n textobject.textLine(line)\n canvas.drawText(textobject)", "def write(self) -> None:\n table = self.loan\n print(\n tabulate(\n table,\n headers=self.header,\n floatfmt=\",.2f\",\n numalign=\"right\",\n tablefmt=\"rst\",\n )\n )", "def produce_header_footer():\n header = pl.PageStyle(\"header\", header_thickness=0.1)\n\n image_filename = get_image()\n with header.create(pl.Head(\"L\")) as logo:\n logo.append(pl.StandAloneGraphic(image_options=\"width=110px\", filename=image_filename))\n\n # Date\n with header.create(pl.Head(\"R\")):\n header.append(\"Date Report Issued: \" + datetime.today().strftime('%Y-%m-%d'))\n\n # Footer\n with header.create(pl.Foot(\"C\")):\n with header.create(pl.Tabular('lcr')) as table:\n table.add_row('', bold('Data interpretation guidelines can be found in RDIMS document ID: 10401305'), '')\n table.add_row('', bold('This report was generated with OLC AutoROGA v0.0.1'), '')\n return header", "def print_footer():\n sys.stdout.write(linesep + linesep)\n sys.stdout.write('longest file: %s' % globals.longest_file + linesep)\n sys.stdout.write('deepest path: %s' % globals.deepest_path + linesep)", "def output_sampled_params_table(self):\n # get length of longest display name\n max_length = max([len(display_name) for display_name in self.optimization_defs.keys()])\n left_sep = \"| \"\n right_sep = \" |\"\n center_sep = \" | \"\n with open(\"sampled_params.md\", 'w') as table_file:\n # Write table headers\n table_file.write(left_sep)\n for i, display_name in enumerate(self.optimization_defs.keys()):\n table_file.write(display_name.rjust(max_length, ' ')) # rjust fills string with spaces\n # write center or right separator, depending on whether we're at the last element\n table_file.write((center_sep if not i == len(self.optimization_defs.keys()) - 1 else right_sep))\n # Write table header separator\n table_file.write('\\n' + left_sep)\n for i in range(len(self.optimization_defs)):\n # the colon position defines alignment of column text, in this case to the right\n table_file.write('-' * max_length + (\":| \" if not i == len(self.optimization_defs.keys()) - 1 else \":|\"))\n # For each sample, create a row\n for x in self.optimizer.X:\n # Write sample's row\n table_file.write('\\n' + left_sep)\n for i, display_name in enumerate(self.optimization_defs.keys()):\n param_value = round(x[self._to_optimizer_id(display_name)], self._params['rounding_decimal_places'])\n table_file.write(str(param_value).rjust(max_length, ' '))\n # write center or right separator, depending on whether we're at the last element\n table_file.write((center_sep if not i == len(self.optimization_defs.keys()) - 1 else right_sep))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reindex all params DataFrames with a common index and apply number formatting.
def _reindex_and_float_format_params( models, show_inference, confidence_intervals, number_format, add_trailing_zeros ): dfs = _get_params_frames_with_common_index(models) cols_to_format = _get_cols_to_format(show_inference, confidence_intervals) formatted_frames, max_trail = _apply_number_formatting_frames( dfs, cols_to_format, number_format, add_trailing_zeros ) return formatted_frames, max_trail
[ "def _re_number(self):\n new_dataset_indices = []\n for g, graph in enumerate(self.graphs):\n graph._force_index(g)\n for s, graph_set in enumerate(graph.sets):\n graph_set._force_index(s)\n new_dataset_indices.append((g,s))\n for i, dataset in enumerate(self.datasets):\n dataset._force_index(*new_dataset_indices[i])", "def reindex(ctx):\n\n request = ctx.obj['bootstrap']()\n\n index.reindex(request.db, request.es, request)", "def reindexObject(idxs=[]):", "def re_index_types(self):\n\n for (index, atom_type) in enumerate(self.atom_types, 1):\n atom_type.index = index\n\n for (index, bond_type) in enumerate(self.bond_types, 1):\n bond_type.index = index\n\n for (index, angle_type) in enumerate(self.angle_types, 1):\n angle_type.index = index\n\n index = 1\n for dihedral_type in self.dihedral_types:\n if isinstance(dihedral_type.index, list):\n for i in range(len(dihedral_type.index)):\n dihedral_type.index[i] = index\n index += 1\n else:\n dihedral_type.index = index\n index += 1\n\n for (index, improper_type) in enumerate(self.improper_types, 1):\n improper_type.index = index", "def patch_index(updates):\n updates_str = [\"%s: %d items\" % (k, len(v)) for k, v in updates.items()]\n logger.info(\"Patching indices (%s)\" % \", \".join(updates_str))\n\n indexers = _get_registered()\n for doc_type, pk_strs in updates.items():\n for indexer in indexers:\n if indexer.doc_type == doc_type:\n _patch_index(\n indexer, [indexer.pk_from_string(pk_str) for pk_str in pk_strs]\n )", "def propagate_format_to_cst(self, optree, new_optree_format, index_list = []):\n index_list = xrange(len(optree.inputs)) if index_list == [] else index_list\n for index in index_list:\n inp = optree.inputs[index]\n if isinstance(inp, Constant) and isinstance(inp.get_precision(), ML_AbstractFormat):\n inp.set_precision(new_optree_format)", "def _update_index(datatable, index, old_index=None):\n _check_index(datatable._dataframe, index)\n if old_index is not None:\n datatable._update_columns({old_index: datatable.columns[old_index].remove_semantic_tags('index')})\n datatable.columns[index]._set_as_index()", "def reindex(self):\n for wh in self.__class__.whoosheers:\n writer = wh.index.writer(timeout=self.writer_timeout)\n for model in wh.models:\n method_name = \"update_{0}\".format(model.__name__.lower())\n for item in model.query.all():\n getattr(wh, method_name)(writer, item)\n writer.commit()", "def normalize_index(df1, df2):\n\n index = df2.index if df2.shape[0] > df1.shape[0] else df1.index\n df1 = df1.reindex(index).fillna(0)\n df2 = df2.reindex(index).fillna(0)\n\n return df1, df2", "def _set_index(self, df):\n self.logger.info(\"Set up index.\")\n\n target = df.columns[-1]\n\n if getattr(self, \"index\", True) is True: # True gets caught by isinstance(int)\n pass\n elif self.index is False:\n df = df.reset_index(drop=True)\n elif isinstance(self.index, int):\n if -df.shape[1] <= self.index <= df.shape[1]:\n df = df.set_index(df.columns[self.index], drop=True)\n else:\n raise ValueError(\n f\"Invalid value for the index parameter. Value {self.index} \"\n f\"is out of range for a dataset with {df.shape[1]} columns.\"\n )\n elif isinstance(self.index, str):\n if self.index in df:\n df = df.set_index(self.index, drop=True)\n else:\n raise ValueError(\n \"Invalid value for the index parameter. \"\n f\"Column {self.index} not found in the dataset.\"\n )\n\n if df.index.name == target:\n raise ValueError(\n \"Invalid value for the index parameter. The index column \"\n f\"can not be the same as the target column, got {target}.\"\n )\n\n if df.index.duplicated().any():\n raise ValueError(\n \"Invalid value for the index parameter. There are duplicate indices \"\n \"in the dataset. Use index=False to reset the index to RangeIndex.\"\n )\n\n return df", "def _get_params_frames_with_common_index(models):\n dfs = [model[\"params\"] for model in models]\n common_index = _get_common_index(dfs)\n out = [model[\"params\"].reindex(common_index) for model in models]\n return out", "def reset_index(self):\n self.index = range(self.values.shape[1])", "def reindex_all_sample_id_to_patient_id(data_dict, reindex_map, additional_to_keep_col=[], skip=[]):\n if isinstance(skip, str): # If it's a single dataframe name, make it a list so we can treat everything the same\n skip = [skip]\n\n dfs_to_delete = []\n dfs_to_keep_col = [\"clinical\"] + additional_to_keep_col\n\n for name in data_dict.keys(): # Only loop over keys, to avoid changing the structure of the object we're looping over\n\n # Skip any specified to skip\n if name in skip:\n continue\n\n df = data_dict[name]\n df.index.name = \"Sample_ID\" # So that it's labeled properly when we keep it as a column in the clinical dataframe.\n keep_old = name in dfs_to_keep_col # Keep the old Patient_ID index as a column in the clinical dataframe (and any additionally specified dataframes), so we have a record of it.\n\n try:\n df = reindex_dataframe(df, reindex_map, \"Patient_ID\", keep_old)\n except ReindexMapError:\n warnings.warn(f\"Error reindexing {name} dataframe. At least one Sample_ID did not have corresponding Patient_ID mapped in clinical dataframe. {name} dataframe not loaded.\", FailedReindexWarning, stacklevel=3) # stacklevel=3 ensures that the warning is registered as originating from the file that called the __init__ function, instead of from here directly, because the former is more useful information.\n dfs_to_delete.append(name)\n continue\n\n data_dict[name] = df\n\n for name in dfs_to_delete: # Delete any dataframes that had issues reindexing\n del data_dict[name]\n\n return data_dict", "def reindex(self):\n for idx, line in enumerate(self.line_map):\n line.index = idx\n if line.annotations:\n for x in line.annotations:\n x.line_num = idx", "def _marshall_index(proto, index):\n index = map(util._maybe_tuple_to_list, index.values)\n index_df = pd.DataFrame(index)\n proto.index = _dataframe_to_pybytes(index_df)", "def setFieldNumIndices(self, fldnumind: 'int const') -> \"void\":\n return _coin.SoNotRec_setFieldNumIndices(self, fldnumind)", "def update_index(self, entity, **kwargs):", "def format_df(df,value_1,value_2,index):\n result = pd.concat([pd.DataFrame(np.array([np.NAN,np.NAN,np.NAN]),columns=[str(value_1)],index=index),\n df,\n pd.DataFrame(np.array([np.NAN,np.NAN,np.NAN]),columns=[str(value_2)],index=index)],\n axis=1)\n return result", "def refresh_all_default_indices():\n for index_type in ALL_INDEX_TYPES:\n alias = get_default_alias(index_type)\n refresh_index(alias)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of params frames, reindexed with a common index.
def _get_params_frames_with_common_index(models): dfs = [model["params"] for model in models] common_index = _get_common_index(dfs) out = [model["params"].reindex(common_index) for model in models] return out
[ "def get_param_indices(state, modes, opcode_ix):\n return [state.intcode[opcode_ix+i] if mode == PARAM_MODE_POSITION else\n opcode_ix+i if mode == PARAM_MODE_IMMEDIATE else\n state.relative_base + state.intcode[opcode_ix+i]\n for i, mode in enumerate(modes, 1)]", "def getFrameList(self):\n\t\treturn self.frameList", "def prepare_test_frames(self, idx):\n results = copy.deepcopy(self.video_infos[idx])\n results = self.sample_clip(results)\n results['modality'] = self.modality\n results['start_index'] = self.start_index\n return self.pipeline(results)", "def _fetch_params(self, raw_params, param_modes, index_of_output_addr=None):\n output_addr = None\n if index_of_output_addr is not None:\n i = index_of_output_addr\n # Output should use POSITION or RELATIVE modes to indicate the addr\n # to write to.\n assert param_modes[i] != ParamMode.IMMEDIATE\n\n # Compute the parameter RELATIVE mode output\n p = raw_params[i]\n if param_modes[i] == ParamMode.POSITION:\n output_addr = p\n elif param_modes[i] == ParamMode.RELATIVE:\n output_addr = self._relative_base + p\n\n # make it IMMEDIATE so it just appends the parameter to ret\n param_modes[i] = ParamMode.IMMEDIATE\n\n ret = []\n for p, m in zip(raw_params, param_modes):\n if m == ParamMode.POSITION:\n ret.append(self._memory[p])\n elif m == ParamMode.IMMEDIATE:\n ret.append(p)\n elif m == ParamMode.RELATIVE:\n addr = self._relative_base + p\n assert addr >= 0\n ret.append(self._memory[addr])\n else:\n raise ValueError()\n\n # put the output address back\n if output_addr is not None:\n ret[index_of_output_addr] = output_addr\n\n return ret", "def get_all_params(self):\n for thli in self.running_threads:\n yield(thli[0],thli[1],thli[2])", "def prepare_train_frames(self, idx):\n results = copy.deepcopy(self.video_infos[idx])\n results = self.sample_clip(results)\n results['modality'] = self.modality\n results['start_index'] = self.start_index\n return self.pipeline(results)", "def extract_param_list(params, prefix=''):\r\n\r\n key_re = re.compile(r\"%s\\.member\\.([0-9]+)\\.(.*)\" % (prefix))\r\n\r\n def get_param_data(params):\r\n for param_name, value in params.items():\r\n match = key_re.match(param_name)\r\n if match:\r\n try:\r\n index = int(match.group(1))\r\n except ValueError:\r\n pass\r\n else:\r\n key = match.group(2)\r\n\r\n yield (index, (key, value))\r\n\r\n # Sort and group by index\r\n key_func = lambda d: d[0]\r\n data = sorted(get_param_data(params), key=key_func)\r\n members = itertools.groupby(data, key_func)\r\n\r\n return [dict(kv for di, kv in m) for mi, m in members]", "def _init_flatten_params(\n self,\n ) -> Tuple[List[nn.Parameter], List[ParamInfo], List[SharedParamInfo]]:\n param_infos: List[ParamInfo] = []\n shared_param_infos = []\n shared_param_memo: Dict[nn.Parameter, Tuple[str, nn.Module, str]] = {}\n params = []\n for module_name, m in self.named_modules():\n for n, p in m.named_parameters(recurse=False):\n if p is not None and (m, n) in self.param_set:\n if p in shared_param_memo:\n mname, shared_m, shared_n = shared_param_memo[p]\n shared_param_infos.append(\n (module_name, mname, m, n, shared_m, shared_n)\n )\n else:\n shared_param_memo[p] = (module_name, m, n)\n param_infos.append(ParamInfo(module_name, m, n))\n params.append(p)\n del shared_param_memo\n\n assert (\n len(set(p.dtype for p in params)) == 1\n ), \"expects all parameters to have same dtype\"\n assert (\n len(set(p.requires_grad for p in params)) == 1\n ), \"expects all parameters to have same requires_grad\"\n assert len(params) == len(set(params)), \"params list should not have dups\"\n\n return params, param_infos, shared_param_infos", "def get_all_from_original(self, idx):\n return [self.image(i) for i in range(idx * self._count,\n (idx+1) * self._count)]", "def get_frames(self) -> List[Frame]:\n return self.animations[self.active_animation]", "def get_frames(self, start, end):\n assert end >= start\n assert start >= 0\n\n result = []\n for i in xrange(start, end, 1):\n result.append(self.get_frame(i))\n return result", "def update_params_to_prune(\n self, exclude_param_index: Set[int]\n ) -> Tuple[Tuple[nn.Module, str], ...]:\n excluded_params_prune = []\n for tuple_index, (layer, type_) in enumerate(self.params_to_prune):\n if tuple_index not in exclude_param_index:\n excluded_params_prune.append((layer, type_))\n return tuple(excluded_params_prune)", "def __getitem__(self, key):\n if isinstance(key, slice):\n return [copy.deepcopy(self.seek_frame(idx)) for idx in xrange(*key.indices(len(self)))]\n else:\n return copy.deepcopy(self.seek_frame(key))", "def make_param_list(self):\n self.params, self.bn_layers = {}, {}\n\n for key in list(self.layers.keys()):\n self.params[key] = []\n self.bn_layers[key] = []\n for layer in self.layers[key]:\n if layer.get_params():\n self.params[key] += layer.get_params()\n if layer.__class__.__name__ == 'BatchNormLayer':\n self.bn_layers[key].append(layer)", "def set_params(df, params=None):\n if isinstance(df, pd.DataFrame):\n id_vars = df.columns.intersection(['dataset', 'regid', 'cfgid']).tolist()\n datasets = df.dataset.unique().tolist()\n regids = df.regid.unique().tolist()\n result = pd.DataFrame()\n for regid in regids:\n for dataset in datasets:\n if isinstance(params, pd.DataFrame):\n par = params[(params.dataset==dataset)&(params.regid==regid)]\n elif isinstance(params, dict):\n par = params[regid]\n if isinstance(par, dict):\n par = par[dataset]\n else:\n par = get_params(dataset, regid)\n par.set_index(id_vars, inplace=True)\n col = par.columns.difference(df.columns)\n result = pd.concat([result, pd.merge(df, par[col], left_on=id_vars, right_index=True, how='inner').reset_index(drop=True)])\n return result\n elif isinstance(df, dict):\n for m in df:\n df[m] = set_params(df[m])\n return df\n elif is_iterable(df):\n out = []\n for table in df:\n out.append(set_params(table))\n return out\n else:\n raise ValueError(\"Argument must be pandas.DataFrame, dict, or iterable\")", "def get_parameters(self, param_list=None):\n if param_list is None:\n parameters = copy.deepcopy(self._parameters)\n parameters['label_mapping'] = copy.deepcopy(self._label_mapping)\n return parameters\n\n param_dict = {}\n for param in param_list:\n if param in self._parameters:\n param_dict[param] = self._parameters.get(param)\n elif param == 'label_mapping':\n param_dict['label_mapping'] = self._label_mapping\n else:\n raise ValueError('`{}` does not exist as a parameter in {}.'.\n format(param, self.__class__.__name__))\n return copy.deepcopy(param_dict)", "def loadframes(self,startframe=None,endframe=None,skip=0):\n if not startframe: startframe=self.currframe\n if not endframe: endframe=self.nframe\n self.opencrd(startframe=startframe)\n nframes=(endframe-startframe)/(1+skip)\n N=nframes*self.nat*3\n self.frames=numpy.zeros(N).reshape(nframes,self.nat,3)\n jframe=0 #number of loaded frames\n iframe=startframe #number of frames read over\n for jframe in range(nframes):\n self.loadframe() #update self.frame\n self.frames[jframe]=self.frame\n self.skipframes(skip) #;print 'jframe=',jframe\n return None", "def set_parameters(self, params):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n i = 0\r\n for param in self.network.parameters():\r\n param = prams[i]\r\n i = i+1", "def prepare_train_frames(self, idx):\n results = copy.deepcopy(self.video_infos[idx])\n results['filename_tmpl'] = self.filename_tmpl\n results['modality'] = self.modality\n results['start_index'] = self.start_index\n output = dict()\n # Step1: Sample frame and resizing\n results_weak = self.pipeline_weak(results)\n # Step2: Strong augmentation\n results_strong = self.pipeline_strong(copy.deepcopy(results_weak))\n\n # NOTE: For ImageNet knowledge distillation\n if 'imagenet_scores' in results_strong:\n # Randomly sample 1 frame for distillation\n fidx = np.random.permutation(results_strong['frame_inds'])[0]\n # NOTE: For Kinetics-100, we only have prob for some of the frames only\n try:\n prob = torch.from_numpy(results_strong['imagenet_scores'][fidx]['prob'])\n except:\n fidx = fidx//4*4+1\n prob = torch.from_numpy(results_strong['imagenet_scores'][fidx]['prob'])\n output['imagenet_prob'] = prob\n del results_strong['imagenet_scores']\n\n # Step3: Final formating\n results_weak = self.pipeline_format(results_weak)\n results_strong = self.pipeline_format(results_strong)\n\n output['label_unlabeled'] = results_weak['label']\n output['imgs_weak'] = results_weak['imgs']\n output['imgs_strong'] = results_strong['imgs']\n\n # NOTE: For ActorCutMix\n if 'human_mask' in results_strong:\n output['human_mask'] = results_strong['human_mask']\n\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get common index from a list of DataFrames.
def _get_common_index(dfs): common_index = [] for d_ in dfs: common_index += [ind for ind in d_.index.to_list() if ind not in common_index] return common_index
[ "def ensure_same_indices(df1, df2): \n df1.index = df1.index.astype(int)\n df2.index = df2.index.astype(int)\n\n intersection_ = df1.index.intersection(df2.index)\n\n if len(intersection_) == 0: \n raise ValueError('DataFrames do not contain any shared years')\n \n if isinstance(df1, pd.Series): \n df1_new = df1.loc[intersection_]\n else:\n df1_new = df1.loc[intersection_, :]\n\n if isinstance(df2, pd.Series): \n df2_new = df2.loc[intersection_]\n else:\n df2_new = df2.loc[intersection_, :]\n\n\n return df1_new, df2_new", "def unionize_indices(dataset, exclude=[]):\n if isinstance(exclude, str): # If it's a single dataframe name, make it a list so we can treat everything the same\n exclude = [exclude]\n indices = [df.index for name, df in dataset.items() if name not in exclude]\n master_index = pd.Index([])\n for index in indices:\n master_index = master_index.union(index)\n master_index = master_index.drop_duplicates()\n \n return master_index", "def market_cap_index(df, df_list):\n index_list = [1000]\n my_list = [0]\n my_list.extend(list(range(len(df_list[0]))))\n mv_0 = reduce(lambda x, y: x + df_list[0][y]['Close'] * df_list[0][y]['supply'], my_list)\n\n for i in range(1, len(df[0])):\n\n my_list = [0]\n my_list.extend(list(range(len(df_list[i]))))\n\n ###\n name_list_0 = [df_list[i - 1][x]['coin_name'] for x in range(len(df_list[i - 1]))]\n name_list_1 = [df_list[i][x]['coin_name'] for x in range(len(df_list[i]))]\n\n cmv = 0\n\n if name_list_0 == name_list_1:\n cmv = reduce(lambda x, y: x + df_list[i - 1][y]['Close'] * (df_list[i][y]['supply'] - df_list[i - 1][y]['supply']), my_list)\n\n else:\n name_list_intersection = list(set(name_list_0) & set(name_list_1))\n name_list_add = list(set(name_list_1) - set(name_list_0))\n\n for c in range(len(df_list[i])):\n # this loop is to avoid column shifting problem\n if df_list[i][c]['coin_name'] in name_list_intersection:\n for cc in range(len(df_list[i - 1])):\n if df_list[i - 1][cc]['coin_name'] == df_list[i][c]['coin_name']:\n previous_c = cc\n\n cmv += df_list[i - 1][previous_c]['Close'] * (df_list[i][c]['supply'] - df_list[i - 1][previous_c]['supply'])\n elif df_list[i][c]['coin_name'] in name_list_add:\n cmv += df_list[i][c]['Close'] * df_list[i][c]['supply']\n ###\n mv_1 = reduce(lambda x, y: x + df_list[i][y]['Close'] * df_list[i][y]['supply'], my_list)\n\n divisor_0 = (mv_0 / 1000) if (i == 1) else divisor_1\n new_index = 1000 if (i == 1) else new_index\n divisor_1 = divisor_0 + cmv / new_index\n\n new_index = mv_1 / divisor_1\n index_list.append(new_index)\n\n mv_0 = mv_1\n\n df_index = pd.DataFrame(index_list, columns=[\"cap_index\"], index=df[0].index)\n\n return df_index", "def match_indexes(self, dts):\n return np.where(self.matches(dts))[0]", "def last_common_item(xs, ys):\n max_i = min(len(xs), len(ys)) - 1\n for i, (x, y) in enumerate(zip(xs, ys)):\n if x == y and (i == max_i or xs[i+1] != ys[i+1]):\n return i\n return -1", "def getSetIndex(SetList,Set):\n\t\tfor i in range( len(SetList)):\n\t\t\tif Grammar.SetEquals( SetList[i],Set):\n\t\t\t\treturn i\n\t\treturn -1", "def _find_dup_idx(self,df_in,msg):\n\n if not df_in.index.is_unique:\n df_dups = df.loc[df.groupby(level=0).size() > 1,:]\n self.add_mismatch(df_dups,msg)\n df_in = df_in.loc[self.dfc.index - dfc_n.index,:]\n return(df_in)", "def _get_params_frames_with_common_index(models):\n dfs = [model[\"params\"] for model in models]\n common_index = _get_common_index(dfs)\n out = [model[\"params\"].reindex(common_index) for model in models]\n return out", "def get_class_index(classes, class_list):\n\tfilter_index = [np.where(class_list == i)[0][0] for i in classes]\n\treturn filter_index", "def get_merged_column_index(num_col_df, num_col_df1, num_col_df2, col_df1=[], col_df2=[], joinkey_df1=[], joinkey_df2=[]):\n\n col_df1 = np.array(col_df1)\n col_df2 = np.array(col_df2)\n\n if num_col_df == num_col_df1 + num_col_df2: # merging keeps same old columns\n col_df2 += num_col_df1\n elif num_col_df == num_col_df1 + num_col_df2 + 1: # merging add column 'key_0' to the head\n col_df1 += 1\n col_df2 += num_col_df1 + 1\n elif num_col_df <= num_col_df1 + num_col_df2 - 1: # merging deletes (possibly many) duplicated \"join-key\" columns in df2, keep and do not change order columns in df1.\n raise ValueError('Format of merged result is too complicated.')\n else:\n raise ValueError('Undefined format of merged result.')\n\n return np.concatenate((col_df1, col_df2)).astype(int).tolist() # 1D numpy array is column vector, so concatenate by axis=0.", "def _get_shared_rows(Xs: List[Tensor]) -> Tuple[Tensor, List[Tensor]]:\n idcs_shared = []\n Xs_sorted = sorted(Xs, key=len)\n X_shared = Xs_sorted[0].clone()\n for X in Xs_sorted[1:]:\n X_shared = X_shared[(X_shared == X.unsqueeze(-2)).all(dim=-1).any(dim=-2)]\n # get indices\n for X in Xs:\n same = (X_shared == X.unsqueeze(-2)).all(dim=-1).any(dim=-1)\n idcs_shared.append(torch.arange(same.shape[-1], device=X_shared.device)[same])\n return X_shared, idcs_shared", "def match_data(mjd1, mjd2):\n # Edge case\n if mjd1.empty or mjd2.empty:\n return None\n \n # Get mjds from dataframes\n mjd1 = np.array(mjd1).reshape(-1,1) # column vector\n mjd2 = np.array(mjd2).reshape(1,-1) # row vector\n \n # Take difference of mjd1 with mjd2\n diffs = np.abs(mjd1 - mjd2)\n # Find smallest difference for each original mjd\n idxs = np.argmin(diffs, axis=1)\n \n # Return indices of matches in mjd2\n return idxs", "def get_valid_idx(mask_list):\n num_cores = multiprocessing.cpu_count()\n data = Parallel(n_jobs=num_cores)(delayed(open_img)(i) for i in mask_list)\n return data", "def check_index_consistency(self):\n dfs = [self._y, self._X_extra, self._X_extra_base, self._X_extra_unenc,\n self._X_select, self._X_select_base, self._X_select_unenc]\n\n indexes = [df.index for df in dfs if df is not None]\n\n for i in range(len(indexes)-1):\n idx1 = indexes[i]\n idx2 = indexes[i+1]\n assert idx1.equals(idx2)", "def find_inds(a, b, unique=False):\n return np.where(np.isin(b, a, assume_unique=unique))", "def get_index(struc, species_1, species_2):\n x = struc.as_dict()\n y = x['sites']\n c1 = np.array([])\n c2 = np.array([])\n for i in range(0, len(y)):\n if y[i]['label'] == species_1:\n c1 = np.append(c1, i)\n elif y[i]['label'] == species_2:\n c2 = np.append(c2, i)\n c1 = c1.astype(int)\n c2 = c2.astype(int)\n return c1, c2", "def extract_merged_dataframe(df, num_col_df1, num_col_df2, col_df1=[], col_df2=[], joinkey_df1=[], joinkey_df2=[]):\n\n return df.iloc[:, get_merged_column_index(num_col_df=df.shape[1], num_col_df1=num_col_df1, num_col_df2=num_col_df2, col_df1=col_df1, col_df2=col_df2, joinkey_df1=joinkey_df1, joinkey_df2=joinkey_df2)]", "def match_indices(shortened_list, primary_list):\n matched_indices = [i for i,x in enumerate(primary_list) if x in shortened_list]\n return matched_indices", "def _get_index(self, source, target):\r\n return [source.index(u) for u in target]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the list of names of columns that need to be formatted. By default, formatting is applied to parameter values. If inference values need to displayed, adds confidence intervals or standard erros to the list.
def _get_cols_to_format(show_inference, confidence_intervals): cols = ["value"] if show_inference: if confidence_intervals: cols += ["ci_lower", "ci_upper"] else: cols.append("standard_error") return cols
[ "def colnames(self):\n return list(self)", "def _colNames(self):\n self.mjdCol = 'expMJD'\n self.fieldIdCol = 'fieldID'\n self.raCol = 'fieldRA'\n self.decCol = 'fieldDec'\n self.propIdCol = 'propID'\n self.propConfCol = 'propConf'\n self.propNameCol = 'propName' #(propname == proptype)\n # For config parsing.\n self.versionCol = 'version'\n self.sessionDateCol = 'sessionDate'\n self.runCommentCol = 'runComment'", "def _output_field_columns(self):\n return sql.SQL(', ').join(map(sql.Identifier, self._output_field_names))", "def get_column_names(self):\r\n return [column.key for column in self.table.columns]", "def what_columns(table):\n print [c.name for c in table.c]", "def get_column_names(self):\n columns = list(self.table_content.keys())\n return columns", "def _GetColumnHeaders(self):\n return [\n \"account_name\",\n \"transaction_date\",\n \"transaction_description\",\n \"transaction_amount\",\n \"category\",\n \"display_name\"\n ]", "def create_forecasts_column_description(self):\n column_descriptions = METRICS_COLUMNS_DESCRIPTIONS\n confidence_interval = self._retrieve_confidence_interval()\n for column in self.forecasts_df.columns:\n if \"forecast_lower_\" in column:\n column_descriptions[\n column\n ] = f\"Lower bound of the {confidence_interval}% forecasts confidence interval.\"\n elif \"forecast_upper_\" in column:\n column_descriptions[\n column\n ] = f\"Upper bound of the {confidence_interval}% forecasts confidence interval.\"\n elif \"forecast_\" in column:\n column_descriptions[column] = \"Median of probabilistic forecasts\"\n return column_descriptions", "def col_titles_display(self):\r\n return self.display['col_titles']", "def get_display_columns(self):\n\n # Find the first input field in the dynamic data area after 'Interval Date =>' which is unique and appears\n # across all displays\n try:\n field_found = self.get_first_field(text_before_input_field='Time =>')\n except ValueError:\n field_found = self.get_first_field(text_before_input_field='Time ==>')\n\n # Set initial line, pos, and length for both column names and dash rows on the display\n self.col_name_line = field_found.row - 2\n col_dash_line = field_found.row - 1\n col_pos = field_found.col\n # adjusted_screen_length = self.screenLen - field_found.col\n adjusted_screen_length = self.screenLen - 1\n\n # Get the page of column names and dashes.\n col_name_str = self.ptg2_em.string_get(self.col_name_line, col_pos, adjusted_screen_length)\n col_len_str = self.ptg2_em.string_get(col_dash_line, col_pos, adjusted_screen_length)\n\n return col_name_str, col_len_str", "def _generate_insert_columns_string(cls):\n\n return ', '.join(cls.COLUMNS)", "def columns(self, category):\n\n if category == \"risk\":\n return [\"Date\", \"Title\", \"Severe\", \"Fatality\", \"Design\", \"Sample\", \"Sampling Method\", \"Matches\"]\n\n return [\"Date\", \"Title\", \"Design\", \"Sample\", \"Sampling Method\", \"Matches\"]", "def table_format(self, table):\r\n if not table:\r\n return [[]]\r\n\r\n extra_space = 1\r\n max_widths = [max([len(str(val)) for val in col]) for col in table]\r\n ftable = []\r\n for irow in range(len(table[0])):\r\n ftable.append([str(col[irow]).ljust(max_widths[icol]) + \" \" * extra_space\r\n for icol, col in enumerate(table)])\r\n return ftable", "def Columns():\n cursor = connection.cursor()\n table = 'patient'\n return render_template(\n 'columns.html',\n title='Columns',\n message='All column names.',\n col = cursor.execute(\"SELECT Column_Name FROM INFORMATION_SCHEMA.Columns WHERE TABLE_NAME=?\",table)\n )\n cursor.close()", "def get_well_columns(self) -> List[List[str]]:\n return self._definition.ordering", "def makeFormat(self):\n if not self.columns:\n #log.error(\"Table columns not set\")\n #return\n raise TableError(\"Table columns not set.\")\n out = \"%%-%ss\" % (self.columns[0]-1)\n for i in self.columns[1:-1]:\n out += \" | %%-%ss\" % (i-2)\n if len(self.columns)>1:\n out += \" | %%-%ss\" % (self.columns[-1]-1)\n self.format_str = out", "def get_col_read_statements(self):\n s=[]\n for i,f in enumerate(self.fields):\n s.append(f.get_read_statement(i))\n\n return '\\n '.join(s)", "def print_columns(self):\r\n for i in range(len(self.columns)):\r\n print(\"{0:02d}:\".format(i),self.columns[i])", "def getColNames(self, dbType='rawDb', tableName=''):\n\n if len(tableName) == 0:\n colsAndTypes = self.getColNamesAndTypes(dbType=dbType)\n else:\n colsAndTypes = self.getColNamesAndTypes(\n dbType=dbType, tableName=tableName)\n if not colsAndTypes:\n return None\n cols = []\n for tup in colsAndTypes:\n cols.append(tup[0])\n return cols" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value of show_col_groups to False or True given column_groups. Updates the default None to True if column_groups is not None. Sets to False otherwise.
def _update_show_col_groups(show_col_groups, column_groups): if show_col_groups is None: if column_groups is not None: show_col_groups = True else: show_col_groups = False return show_col_groups
[ "def _customize_col_groups(default_col_groups, custom_col_groups):\n if custom_col_groups:\n if not default_col_groups:\n if not isinstance(custom_col_groups, list):\n raise ValueError(\n \"\"\"With unique model names, multiple models can't be grouped\n under common group name. Provide list of unique group names instead,\n if you wish to add column level.\"\"\"\n )\n col_groups = custom_col_groups\n else:\n if isinstance(custom_col_groups, list):\n col_groups = custom_col_groups\n elif isinstance(custom_col_groups, dict):\n col_groups = (\n pd.Series(default_col_groups).replace(custom_col_groups).to_list()\n )\n else:\n raise TypeError(\n f\"\"\"Invalid type for custom_col_groups. Can be either list\n or dictionary, or NoneType. Not: {type(col_groups)}.\"\"\"\n )\n else:\n col_groups = default_col_groups\n return col_groups", "def _toggle_experiment_group_collapsable(last_show_click, last_hide_click):\n last_show_click = last_show_click if last_show_click else 0\n last_hide_click = last_hide_click if last_hide_click else 0\n\n # \"show\" is clicked: open collabsable, hide \"show\" button & un-hide \"hide\" button\n if int(last_show_click) > int(last_hide_click):\n return True, True, False\n # \"hide\" is clicked: close collabsable, un-hide \"show\" button & hide \"hide\" button\n elif int(last_hide_click) > int(last_show_click):\n return False, False, True\n # nothing yet is clicked: return default states\n else:\n return False, False, True", "def _hide_output(ctx, groups):\n groups = set(groups or [])\n if not groups.issubset(constants.VALID_FABRIC_GROUPS):\n ctx.task.abort('`hide_output` must be a subset of {0} (Provided: {1})'\n .format(', '.join(constants.VALID_FABRIC_GROUPS), ', '.join(groups)))\n return fabric.api.hide(*groups)", "def make_table_columns_visible(self, table_ui=None):\n nbr_col_h1 = table_ui.columnCount()\n for _col in np.arange(nbr_col_h1):\n table_ui.setColumnHidden(_col, False)", "def _toggle_columns(self, view: View, defx: Defx,\n context: Context) -> None:\n columns = (context.args[0] if context.args else '').split(':')\n if not columns:\n return\n current_columns = [x.name for x in view._columns]\n if columns == current_columns:\n # Use default columns\n columns = context.columns.split(':')\n view._init_columns(columns)", "def _setup_columns(self):\n if self.has_checkboxes:\n toggle_cell = Gtk.CellRendererToggle()\n toggle_cell.connect('toggled', self.on_toggle,\n self.model.data_source.selected_column_idx)\n col = Gtk.TreeViewColumn(\n '', toggle_cell,\n active=self.model.data_source.selected_column_idx)\n\n check_btn = Gtk.CheckButton()\n col.set_widget(check_btn)\n check_btn.show()\n\n self.check_btn_toggled_id = check_btn.connect(\n \"toggled\", self.on_select_all_column_clicked)\n\n # Mimic toggle on checkbutton since it won't receive the click.\n # This will work when clicking directly on the checkbutton or on\n # the header button itself.\n col.connect(\n 'clicked',\n lambda tvc: check_btn.set_active(not check_btn.get_active()))\n\n self.check_btn_toggle_all = check_btn\n self.append_column(col)\n\n # FIXME: We should find a better way for hiding this columns.\n # A way to specify the visibility on the columns config would be nice.\n dont_display = set([self.model.data_source.SELECTED_COLUMN])\n if not self.model.data_source.display_all:\n dont_display.add(self.model.data_source.ID_COLUMN)\n dont_display.add(self.model.data_source.PARENT_ID_COLUMN)\n if not self.model.active_params.get('flat', False):\n dont_display.add(self.model.data_source.FLAT_COLUMN)\n\n samples = list(itertools.islice(\n (r.data for r in self.model.iter_rows()), self.SAMPLE_SIZE))\n for column_index, column in enumerate(self.model.columns):\n item = column['name']\n display = item in self.model.display_columns\n if display and column['name'] not in dont_display:\n item_display = column['display']\n if column['transform'] in ['boolean', 'image']:\n renderer = Gtk.CellRendererPixbuf()\n cell_renderer_kwargs = {'pixbuf': column_index}\n else:\n renderer = Gtk.CellRendererText()\n renderer.set_property('ellipsize', Pango.EllipsizeMode.END)\n if column['type'] in (int, long, float):\n renderer.set_property('xalign', 1)\n cell_renderer_kwargs = {'text': column_index}\n lbl = '%s' % (item_display.replace('_', '__'),)\n col = Gtk.TreeViewColumn(lbl, renderer, **cell_renderer_kwargs)\n col.connect('clicked', self.on_tvcol_clicked, item)\n col.set_resizable(True)\n # Set the minimum width for the column based on the width\n # of the label and some padding\n width = self._get_pango_string_width(lbl) + 14\n col.set_fixed_width(\n self._get_best_column_width(column_index, samples))\n col.set_sizing(Gtk.TreeViewColumnSizing.FIXED)\n col.set_expand(column['expand'])\n if item == self.active_sort_column:\n # When the column is expanded, leave a little more\n # space for the sort indicator\n width += self._get_pango_string_width(\n u\" \\u25BC\".encode('utf-8'))\n col.set_sort_indicator(True)\n col.set_sort_order(self.active_sort_column_order)\n col.set_min_width(width)\n self.append_column(col)\n\n self.set_headers_clickable(True)\n self._update_toggle_check_btn_activity()", "def supports_gradebook_column_admin(self):\n return # boolean", "def SetVisible(self, visible):\n if self._visible != visible:\n self._visible = visible\n for action in self._actions:\n action._SetGroupVisible(visible)", "def set_display_only(self, display_only):\n self.display_only = display_only", "def showGroupMenu( self ):\n group_active = self.isGroupingActive()\n group_by = self.groupBy()\n \n menu = XMenu(self)\n menu.setTitle('Grouping Options')\n menu.setShowTitle(True)\n menu.addAction('Edit Advanced Grouping')\n \n menu.addSeparator()\n \n action = menu.addAction('No Grouping')\n action.setCheckable(True)\n action.setChecked(not group_active)\n \n action = menu.addAction('Advanced')\n action.setCheckable(True)\n action.setChecked(group_by == self.GroupByAdvancedKey and group_active)\n if ( group_by == self.GroupByAdvancedKey ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n menu.addSeparator()\n \n # add dynamic options from the table schema\n tableType = self.tableType()\n if ( tableType ):\n columns = tableType.schema().columns()\n columns.sort(key = lambda x: x.displayName())\n for column in columns:\n action = menu.addAction(column.displayName())\n action.setCheckable(True)\n action.setChecked(group_by == column.displayName() and\n group_active)\n \n if ( column.displayName() == group_by ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n point = QPoint(0, self.uiGroupOptionsBTN.height())\n action = menu.exec_(self.uiGroupOptionsBTN.mapToGlobal(point))\n \n if ( not action ):\n return\n elif ( action.text() == 'Edit Advanced Grouping' ):\n print 'edit advanced grouping options'\n elif ( action.text() == 'No Grouping' ):\n self.setGroupingActive(False)\n \n elif ( action.text() == 'Advanced' ):\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(self.GroupByAdvancedKey)\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()\n \n else:\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(str(action.text()))\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()", "def make_all_columns_visible(self):\n self.make_table_columns_visible(table_ui=self.ui.h1_table)\n self.make_table_columns_visible(table_ui=self.ui.h2_table)\n self.make_table_columns_visible(table_ui=self.ui.h3_table)", "def _set_display_options(length, cols=True):\n if cols:\n pd.set_option(\"display.max_columns\", length)\n else:\n pd.set_option(\"display.max_rows\", length)", "def _set_isDataPanelVisible(self, *args) -> \"bool\" :\n return _core.Data__set_isDataPanelVisible(self, *args)", "def _set_isExpanded(self, *args) -> \"bool\" :\n return _core.GroupCommandInput__set_isExpanded(self, *args)", "def set_display_axis(self, axis):\r\n if axis == \"yx\":\r\n self.slice_xy = True\r\n self.slice_xz = False\r\n self.slice_yz = False\r\n elif axis == \"yz\":\r\n self.slice_xy = False\r\n self.slice_xz = True\r\n self.slice_yz = False\r\n elif axis == \"xz\":\r\n self.slice_xy = False\r\n self.slice_xz = False\r\n self.slice_yz = True\r\n self.display_slice(0)", "def set_visibility(group, status=True):\n for tree in group:\n tree.visible = True", "def owner_has_distribution_groups(self, owner_has_distribution_groups):\n\n self._owner_has_distribution_groups = owner_has_distribution_groups", "def _set_hasGrid(self, *args) -> \"bool\" :\n return _core.TableCommandInput__set_hasGrid(self, *args)", "def setup_column_prefs( self ):\n\n\t\tpass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check identically named models are adjacent.
def _check_order_of_model_names(model_names): group_to_col_index = _create_group_to_col_position(model_names) for positions in group_to_col_index.values(): if positions != list(range(positions[0], positions[-1] + 1)): raise ValueError( "If there are repetitions in model_names, models with the " f"same name need to be adjacent. You provided: {model_names}" )
[ "def _same_namedtuples(nest1, nest2):\n if nest1._fields != nest2._fields:\n return False\n if nest1.__class__.__name__ != nest2.__class__.__name__:\n return False\n return True", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return \\\n len({node.get_name() for node in self.get_left_nodeset().union(self.get_right_nodeset())}) \\\n != len(self.get_left_nodeset()) + len(self.get_right_nodeset())", "def equals(self, other: object) -> bool:\n return type(other) == ForwardModel and self.id == other.id and self.name == other.display_name \\\n and self.description == other.description and self.model_data_type == other.input_type \\\n and self.input_bands == other.input_bands and self.input_band_indices == other.input_band_indices", "def test_equivalent(self):\n for index1, element1 in enumerate(element_list[0:10]):\n for index2, element2 in enumerate(element_list[0:10]):\n atom1 = Atom(element=element1, radical_electrons=1, charge=0, label='*1', lone_pairs=0)\n atom2 = Atom(element=element2, radical_electrons=1, charge=0, label='*1', lone_pairs=0)\n if index1 == index2:\n self.assertTrue(atom1.equivalent(atom2))\n self.assertTrue(atom2.equivalent(atom1))\n else:\n self.assertFalse(atom1.equivalent(atom2))\n self.assertFalse(atom2.equivalent(atom1))", "def same_label(self):\n return len(self.labels_list)<=1", "def _are_lights_sequential(a, b):\n return a.node == b.node and a.index + 1 == b.index", "def compare_two_model_index(index_1, index_2):\n return (\n (index_1.row() == index_2.row())\n and (index_1.column() == index_2.column())\n and (index_1.parent() == index_2.parent())\n )", "def test_equivalent(self):\n for order1 in self.orderList:\n for order2 in self.orderList:\n bond1 = Bond(None, None, order=order1)\n bond2 = Bond(None, None, order=order2)\n if order1 == order2:\n self.assertTrue(bond1.equivalent(bond2))\n self.assertTrue(bond2.equivalent(bond1))\n else:\n self.assertFalse(bond1.equivalent(bond2))\n self.assertFalse(bond2.equivalent(bond1))", "def test_comparison_on_different_names(self):\n a = objects.SplitKey(name=\"Test Split Key\")\n b = objects.SplitKey(name=\"Split Key Test\")\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def is_id_duplicate(self) -> bool:\n for step in self.job.steps:\n if (step.id == self.id) and (step != self):\n return True\n return False", "def check_if_flower_model(model, log):\n activities = Utility.get_activities(log)\n source_places=set()\n target_places=set()\n for transition in model.transitions:\n if transition.name or transition.label in activities:\n for arc in transition.in_arcs:\n source_places.add(arc.source)\n for arc in transition.out_arcs:\n target_places.add(arc.target)\n if source_places==target_places:\n return True\n else:\n return False", "def test_same_name_same_id(self):\n c = Neuron(name=\"boots\")\n c1 = Neuron(name=\"boots\")\n self.assertEqual(c.identifier, c1.identifier)", "def _same_atom_different_altloc(atom1, atom2):\n\n label1, label2 = [i.fetch_labels() for i in [atom1, atom2]]\n name1, name2 = atom1.name.strip(), atom2.name.strip()\n chain1, chain2 = label1.chain_id, label2.chain_id\n res1, res2 = label1.resid(), label2.resid()\n return name1 == name2 and chain1 == chain2 and res1 == res2", "def can_follow(self, prev):\n # TODO NOTE unused_indices is not compared here, in order to allow program fragment repetition\n return self.init_reg_refs == prev.reg_refs", "def computeEquivalence(self):\n\t\tdone = {}\n\t\tself.groups = []\n\t\tfor s1 in self.states.values():\n\t\t\tif s1.name not in done:\n\t\t\t\tnewGroup = [s1]\n\t\t\t\tdone[s1.name] = True\n\t\t\t\tfor s2 in self.states.values():\n\t\t\t\t\tif s2.name not in done and s1.isEquivalent(s2):\n\t\t\t\t\t\tnewGroup.append(s2)\n\t\t\t\t\t\tdone[s2.name] = True\n\t\t\t\tself.groups.append(newGroup)\n\n\t\tfor i in range(len(self.groups)):\n\t\t\tself.groups[i] = sorted(self.groups[i],\n\t\t\t\t\t\t\t\t\tkey=operator.attrgetter(\"name\"))\n\n\t\tself.groups = sorted(self.groups,key=lambda x: x[0].name)", "def check_names(collection, message=\"Two objects share name '{}'.\"):\n nc = collision([x.name for x in collection])\n if nc:\n raise ValueError(message.format(nc))\n return True", "def check_name(self, cr, uid, ids, context=None):\n \n for rec in self.browse(cr, uid, ids, context=context):\n idss = self.search(cr,uid, [('name', '=', rec.name),('id','!=',rec.id)])\n if idss:\n raise osv.except_osv(_('ERROR'), _('This place name is already exisit for the company %s') % (rec.company_id.name))\n return True", "def has_same_bonds(self, other_atoms):\n if len(self) != len(other_atoms):\n return False\n if len(self.bonds) != len(other_atoms.bonds):\n return False\n for (i, atom) in enumerate(self.atoms):\n other = other_atoms.atoms[i]\n # print(\"{}={}\".format(i, atom.index))\n atom_neighbors = {n.index for n in atom.neighbors}\n other_neighbors = {n.index for n in other.neighbors}\n # print(atom_neighbors, other_neighbors)\n if atom_neighbors == other_neighbors:\n continue\n else:\n return False\n return True", "def isEquivalent(self, other: ghidra.program.model.symbol.ExternalLocation) -> bool:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change default (inferred) column group titles using custom column groups.
def _customize_col_groups(default_col_groups, custom_col_groups): if custom_col_groups: if not default_col_groups: if not isinstance(custom_col_groups, list): raise ValueError( """With unique model names, multiple models can't be grouped under common group name. Provide list of unique group names instead, if you wish to add column level.""" ) col_groups = custom_col_groups else: if isinstance(custom_col_groups, list): col_groups = custom_col_groups elif isinstance(custom_col_groups, dict): col_groups = ( pd.Series(default_col_groups).replace(custom_col_groups).to_list() ) else: raise TypeError( f"""Invalid type for custom_col_groups. Can be either list or dictionary, or NoneType. Not: {type(col_groups)}.""" ) else: col_groups = default_col_groups return col_groups
[ "def _update_show_col_groups(show_col_groups, column_groups):\n if show_col_groups is None:\n if column_groups is not None:\n show_col_groups = True\n else:\n show_col_groups = False\n return show_col_groups", "def group_names_for_display(self):\n return self.demographic_group_name, \"non-\" + self.demographic_group_name", "def test_custom_span_group_name(self):\n\n # let's make sure that this pipe has been initialized\n # At least for MacOS and Linux which are currently supported...\n\n\n # allow default QuickUMLS (very small sample data) to be loaded\n nlp = spacy.blank(\"en\")\n\n custom_span_group_name = \"my_own_span_group\"\n\n nlp.add_pipe(\"medspacy_quickumls\", config={\"threshold\": 0.7,\n \"result_type\": \"group\",\n \"span_group_name\": custom_span_group_name})\n\n text = \"Decreased dipalmitoyllecithin also branching glycosyltransferase and dipalmitoyl phosphatidylcholine\"\n\n doc = nlp(text)\n\n assert len(doc.ents) == 0\n\n assert MEDSPACY_DEFAULT_SPAN_GROUP_NAME not in doc.spans or len(doc.spans[MEDSPACY_DEFAULT_SPAN_GROUP_NAME]) == 0\n\n assert len(doc.spans[custom_span_group_name]) >= 1", "def setGroupBy( self, groupBy ):\n if ( type(groupBy) in (list, tuple) ):\n groupBy = ','.join(map(str, groupBy))\n \n self.uiGroupingTXT.setText(groupBy)", "def group_label(self, group_label):\n\n self._group_label = group_label", "def change_group_name(self, user_id: int, group_name: str):\n self.cursor.execute(f\"UPDATE public.users SET group_name = '{group_name}' WHERE user_id = {user_id}\")\n self.conn.commit()", "def update_group(attr, old, new):\n variable_select.options = nix(new, all_options, var_options)\n update_table()", "def option_group_name(self) -> str:\n ...", "def group_template_name(self) -> str:\n return f\"group_heading.{self.template_suffix}\"", "def add_group_columns(df: pd.DataFrame, group_type, len_BG=8, positive_group=\"nFocal\"):\n # Select the bg_col based on the group_type. These columns must have been added to the df:\n if group_type == \"block_group\":\n bg_col = \"BlockGroup\"\n df[\"BlockGroup\"] = df.index.to_series().astype(str).str.slice(stop=len_BG+1)\n elif group_type == \"nbhd\":\n bg_col = \"Neighborhood\"\n else:\n print(\"Grouping method must be nbhd or block_group. No grouping defined.\")\n return df\n\n # Create an aggregated dataframe with counts per block group:\n agg_df = df[[bg_col, \"n\", \"nFocal\", \"nFRL\", \"nAALPI\", \"nBoth\"]].groupby(bg_col).sum()\n agg_df = agg_df.rename(columns={\"n\": \"BG_n\",\n \"nFocal\": \"BG_nFocal\",\n \"nFRL\": \"BG_nFRL\",\n \"nAALPI\": \"BG_nAALPI\",\n \"nBoth\": \"BG_nBoth\"})\n # Merge the aggregated df on the main df via the bg_column:\n df[\"geoid\"] = df.index\n extended_df = df.merge(agg_df, on=bg_col).set_index(\"geoid\")\n\n extended_df[\"BG_nOther\"] = extended_df['BG_n'] - extended_df[\"BG_\" + positive_group]\n\n extended_df['BG_pctFRL'] = extended_df['BG_nFRL'] / extended_df['BG_n']\n extended_df['BG_pctAALPI'] = extended_df['BG_nAALPI'] / extended_df['BG_n']\n extended_df['BG_pctFocal'] = extended_df['BG_nFocal'] / extended_df['BG_n']\n\n extended_df['BG_pctBoth'] = extended_df['BG_nBoth'] / extended_df['BG_n']\n extended_df['BG_pctBothUnion'] = extended_df['BG_nBoth'] / extended_df['BG_nFocal'] #union\n\n return extended_df", "def custom_group(self, obj):\n return ','.join([g.name for g in obj.groups.all()]) if obj.groups.count() else ''", "def intFieldGrp(groupName, docTag=\"string\", height=int, columnWidth4=int, extraLabel=\"string\", enable3=bool, popupMenuArray=bool, numberOfPopupMenus=bool, noBackground=bool, defineTemplate=\"string\", width=int, label=\"string\", highlightColor=float, value=int, dragCallback=\"string\", columnOffset2=int, parent=\"string\", value2=int, annotation=\"string\", columnAlign5=\"string\", columnOffset5=int, preventOverride=bool, columnAlign=int, columnWidth6=int, adjustableColumn4=int, rowAttach=int, columnOffset3=int, columnAlign4=\"string\", adjustableColumn5=int, dragCommand=\"string\", exists=bool, columnAttach4=\"string\", value3=int, numberOfFields=int, value1=int, adjustableColumn2=int, visible=bool, enable=bool, adjustableColumn6=int, enableBackground=bool, visibleChangeCommand=\"string\", adjustableColumn=int, columnWidth3=int, columnAlign2=\"string\", useTemplate=\"string\", columnAlign6=\"string\", columnWidth1=int, columnWidth2=int, columnAttach3=\"string\", fullPathName=bool, enable2=bool, dropCallback=\"string\", columnAlign3=\"string\", columnAttach=int, adjustableColumn3=int, columnAttach5=\"string\", backgroundColor=float, columnWidth5=int, enable4=bool, columnWidth=int, manage=bool, columnOffset4=int, enable1=bool, changeCommand=\"string\", columnAttach2=\"string\", value4=int, columnAttach6=\"string\", isObscured=bool, columnOffset6=int):\n pass", "def setKerningGroupConversionRenameMaps(self, maps):\n if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:\n return # XXX raise an error here\n # flip the dictionaries\n remap = {}\n for side in (\"side1\", \"side2\"):\n for writeName, dataName in list(maps[side].items()):\n remap[dataName] = writeName\n self._downConversionKerningData = dict(groupRenameMap=remap)", "def make_groups(df, gb, groups):\n # first convert all groups to lists for safety\n groups_list = [[i] if type(i) != list else i for i in groups]\n # why does this line throw a warning?\n df[\"group\"] = \"\"\n for g in groups_list:\n group_name = \"+\".join(g)\n index = itemgetter(*g)(gb.groups)\n # either a tuple of indices or a single index\n if type(index) == tuple:\n index = reduce(lambda a, b: a.union(b), index)\n df.loc[index, \"group\"] = group_name\n return df", "def insert_case_number_in_grouped_df(self):\n self.build_grouped_dataframe()\n if not ws_constants.DEFAULT_CASE_INDEX_KEY in self.dataframe:\n self.dataframe[ws_constants.DEFAULT_CASE_INDEX_KEY] = self.grouped_dataframe.ngroup()", "def group_title(path):\n\n def filter_group(group):\n for suffix in [\"_patch_parameter\", \"_update_parameter\", \"_parameter\"]:\n if group.endswith(suffix):\n group = group[:0 - len(suffix)]\n return group\n\n group_path = path.split('.')\n group_path = list(map(filter_group, group_path))\n title = ': '.join(group_path)\n for each in group_path:\n title = title.replace(each, \" \".join([n.title() for n in each.split('_')]), 1)\n return title", "def attrFieldGrp(groupName, docTag=\"string\", extraButton=bool, step=float, columnWidth4=int, extraLabel=\"string\", popupMenuArray=bool, numberOfPopupMenus=bool, noBackground=bool, defineTemplate=\"string\", width=int, label=\"string\", highlightColor=float, height=int, dragCallback=\"string\", columnOffset2=int, parent=\"string\", annotation=\"string\", columnAlign5=\"string\", columnOffset5=int, preventOverride=bool, columnAlign=int, columnWidth6=int, adjustableColumn4=int, hideMapButton=bool, rowAttach=int, columnOffset3=int, columnAlign4=\"string\", adjustableColumn5=int, forceAddMapButton=bool, exists=bool, columnAttach4=\"string\", extraButtonCommand=\"string\", numberOfFields=int, extraButtonIcon=\"string\", adjustableColumn2=int, visible=bool, enable=bool, adjustableColumn6=int, enableBackground=bool, visibleChangeCommand=\"string\", adjustableColumn=int, columnWidth3=int, columnAlign2=\"string\", useTemplate=\"string\", columnAlign6=\"string\", columnWidth1=int, columnWidth2=int, columnAttach3=\"string\", precision=int, fullPathName=bool, attribute=\"string\", dropCallback=\"string\", columnAlign3=\"string\", columnAttach=int, adjustableColumn3=int, columnAttach5=\"string\", backgroundColor=float, columnWidth5=int, minValue=float, columnWidth=int, maxValue=float, manage=bool, columnOffset4=int, changeCommand=\"string\", columnAttach2=\"string\", columnAttach6=\"string\", isObscured=bool, columnOffset6=int):\n pass", "def columns(self):\n columns = super(SubmittedProposalListingTab, self).columns\n for col in columns:\n if col.get('column') == 'title':\n col['transform'] = proposal_title_link\n\n return columns", "def format_medical_table_headers(self):\n med_cols = ['B', 'C', 'D', 'E']\n for col in med_cols:\n cell = f'{col}{self.title_final_row + 1}'\n self.format_cell_as_header(cell)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change default (inferred) column names using custom column names.
def _customize_col_names(default_col_names, custom_col_names): if not custom_col_names: col_names = default_col_names elif isinstance(custom_col_names, dict): col_names = list(pd.Series(default_col_names).replace(custom_col_names)) elif isinstance(custom_col_names, list): if not len(custom_col_names) == len(default_col_names): raise ValueError( f"""If provided as a list, custom_col_names should have same length as default_col_names. Lenght of custom_col_names {len(custom_col_names)} !=length of default_col_names {len(default_col_names)}""" ) elif any(isinstance(i, list) for i in custom_col_names): raise ValueError("Custom_col_names cannot be a nested list") col_names = custom_col_names else: raise TypeError( f"""Invalid type for custom_col_names. Can be either list or dictionary, or NoneType. Not: {col_names}.""" ) return col_names
[ "def change_column_names(data, new_names):\n old_names = data.columns\n if isinstance(new_names, list):\n mapping = dict(zip(old_names, new_names))\n else:\n mapping = new_names\n\n transformed_data = data.select([col(c).alias(mapping.get(c, c)) for c in data.columns])\n return transformed_data", "def rename_columns(self):\r\n self.columns = [self._date, self._net_purchase, self._gross_sale, self._tax, self._margin]\r\n self.all_data.columns = self.columns", "def __rename_used_cols(df, train_cols: list):\n return df.rename(columns={col: C.USED_COL_FORMAT.format(col) for col in train_cols})", "def column_default(self, column_data: Dict) -> str:\n if isinstance(column_data.default, str):\n if column_data.type.upper() in datetime_types:\n if \"now\" in column_data.default.lower():\n # todo: need to add other popular PostgreSQL & MySQL functions\n column_data.default = \"func.now()\"\n self.state.add(\"func\")\n elif \"'\" not in column_data.default:\n column_data.default = f\"'{column_data.default}'\"\n else:\n if \"'\" not in column_data.default:\n column_data.default = f\"'{column_data.default}'\"\n else:\n column_data.default = f\"'{str(column_data.default)}'\"\n default_property = st.default.format(default=column_data.default)\n return default_property", "def rename_columns(df_data, new_col):\n df_data.rename(columns=new_col, inplace=True)", "def _colNames(self):\n self.mjdCol = 'expMJD'\n self.fieldIdCol = 'fieldID'\n self.raCol = 'fieldRA'\n self.decCol = 'fieldDec'\n self.propIdCol = 'propID'\n self.propConfCol = 'propConf'\n self.propNameCol = 'propName' #(propname == proptype)\n # For config parsing.\n self.versionCol = 'version'\n self.sessionDateCol = 'sessionDate'\n self.runCommentCol = 'runComment'", "def _customize_col_groups(default_col_groups, custom_col_groups):\n if custom_col_groups:\n if not default_col_groups:\n if not isinstance(custom_col_groups, list):\n raise ValueError(\n \"\"\"With unique model names, multiple models can't be grouped\n under common group name. Provide list of unique group names instead,\n if you wish to add column level.\"\"\"\n )\n col_groups = custom_col_groups\n else:\n if isinstance(custom_col_groups, list):\n col_groups = custom_col_groups\n elif isinstance(custom_col_groups, dict):\n col_groups = (\n pd.Series(default_col_groups).replace(custom_col_groups).to_list()\n )\n else:\n raise TypeError(\n f\"\"\"Invalid type for custom_col_groups. Can be either list\n or dictionary, or NoneType. Not: {type(col_groups)}.\"\"\"\n )\n else:\n col_groups = default_col_groups\n return col_groups", "def setColumns( self, names ):\n self.columns = names", "def new_column_name(self) -> str:\n return pulumi.get(self, \"new_column_name\")", "def _standardize_column_names(cls, data: pd.DataFrame):\n return data.rename(cls._get_column_mapping(), axis='columns')[cls.PPI_DATA_COL_LST].reset_index(drop=True)", "def set_source_column(self, source_column_name: str) -> str:\n self.source_column = source_column_name", "def normalize_col_name(self, col_name, used_column_names, is_relation):\n field_params = {}\n field_notes = []\n\n new_name = clean_utf8(col_name)\n new_name = col_name.lower()\n if new_name != col_name:\n field_notes.append('Field name made lowercase.')\n\n if is_relation:\n if new_name.endswith('_id'):\n new_name = new_name[:-3]\n else:\n field_params['db_column'] = col_name\n\n new_name, num_repl = re.subn(r'\\W', '_', new_name)\n if num_repl > 0:\n field_notes.append('Field renamed to remove unsuitable characters.')\n\n if new_name.find('__') >= 0:\n while new_name.find('__') >= 0:\n new_name = new_name.replace('__', '_')\n if col_name.lower().find('__') >= 0:\n # Only add the comment if the double underscore was in the original name\n field_notes.append(\"Field renamed because it contained more than one '_' in a row.\")\n\n if new_name.startswith('_'):\n new_name = 'field%s' % new_name\n field_notes.append(\"Field renamed because it started with '_'.\")\n\n if new_name.endswith('_'):\n new_name = '%sfield' % new_name\n field_notes.append(\"Field renamed because it ended with '_'.\")\n\n if keyword.iskeyword(new_name):\n new_name += '_field'\n field_notes.append('Field renamed because it was a Python reserved word.')\n\n if new_name[0].isdigit():\n new_name = 'number_%s' % new_name\n field_notes.append(\"Field renamed because it wasn't a valid Python identifier.\")\n\n if new_name in used_column_names:\n num = 0\n while '%s_%d' % (new_name, num) in used_column_names:\n num += 1\n new_name = '%s_%d' % (new_name, num)\n field_notes.append('Field renamed because of name conflict.')\n\n if col_name != new_name and field_notes:\n field_params['db_column'] = col_name\n\n return new_name, field_params, field_notes", "def rename_column_(self, original_column_name: str, new_column_name: str):\n self._check_values_type()\n for dataset in self.values():\n dataset.rename_column_(original_column_name=original_column_name, new_column_name=new_column_name)", "def set_target_column(self, target_column_name: str) -> str:\n self.target_column = target_column_name", "def _mapColumnNames(self, oldColumns, newColumns):\n newToOldNameMapping = {}\n oldColumnsRemoved = []\n newColumnsAdded = []\n for ocol in oldColumns:\n if ocol in newColumns:\n newToOldNameMapping[ocol] = ocol\n elif self._renamedColumns.has_key(ocol):\n newToOldNameMapping[self._renamedColumns[ocol]] = ocol\n else:\n oldColumnsRemoved.append(ocol)\n for ncol in newColumns:\n if not ncol in oldColumns:\n if not ncol in self._renamedColumns.values():\n newColumnsAdded.append(ncol)\n return (oldColumnsRemoved, newColumnsAdded, newToOldNameMapping)", "def default_column_width(self, default_column_width):\n\n self._default_column_width = default_column_width", "def change_cols(data, params):\n # Drop bad columns first\n good_cols = [col for col,val in params.items() if (val and col in data.columns)]\n new_data = data[good_cols].copy()\n \n # Re-map column names\n col_mapper = {col: new_col for col,new_col in params.items() if isinstance(new_col, str)}\n new_data.rename(columns=col_mapper, inplace=True)\n \n return new_data", "def compile_rename_column(self, blueprint, command, connection):\n # The code is a little complex. It will propably change\n # if we support complete diffs in dbal\n sql = []\n\n schema = connection.get_schema_manager()\n table = self.get_table_prefix() + blueprint.get_table()\n\n column = connection.get_column(table, command.from_)\n\n columns = schema.list_table_columns(table).values()\n indexes = schema.list_table_indexes(table)\n foreign_keys = schema.list_table_foreign_keys(table)\n\n diff = self._get_renamed_diff(blueprint, command, column, schema)\n renamed_columns = diff.renamed_columns\n\n old_column_names = list(map(lambda x: x.get_name(), columns))\n\n # We build the new column names\n new_column_names = []\n for column_name in old_column_names:\n if column_name in renamed_columns:\n new_column_names.append(renamed_columns[column_name].get_name())\n else:\n new_column_names.append(column_name)\n\n # We create a temporary table and insert the data into it\n temp_table = '__temp__' + self.get_table_prefix() + blueprint.get_table()\n sql.append('CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s'\n % (temp_table, self.columnize(old_column_names), table))\n\n # We drop the current table\n sql += Blueprint(table).drop().to_sql(None, self)\n\n # Building the list a new columns\n new_columns = []\n for column in columns:\n for column_name, changed_column in renamed_columns.items():\n if column_name == column.get_name():\n new_columns.append(changed_column)\n\n # Here we will try to rebuild a new blueprint to create a new table\n # with the original name\n new_blueprint = Blueprint(table)\n new_blueprint.create()\n primary = []\n for column in columns:\n # Mapping the database type to the blueprint type\n type = column.get_type()\n if type == 'smallint':\n type = 'small_integer'\n elif type == 'bigint':\n type = 'big_integer'\n elif type == 'blob':\n type = 'binary'\n\n # If the column is a primary, we will add it to the blueprint later\n if column.get_platform_option('pk'):\n primary.append(column.get_name())\n\n # If the column is not one that's been renamed we reinsert it into the blueprint\n if column.get_name() not in renamed_columns.keys():\n col = getattr(new_blueprint, type)(column.get_name())\n\n # If the column is nullable, we flag it\n if not column.get_notnull():\n col.nullable()\n\n # If the column has a default value, we add it\n if column.get_default() is not None:\n col.default(QueryExpression(column.get_default()))\n\n # Inserting the renamed columns into the blueprint\n for column in new_columns:\n type = column.get_type()\n if type == 'smallint':\n type = 'small_integer'\n elif type == 'bigint':\n type = 'big_integer'\n elif type == 'blob':\n type = 'binary'\n\n col = getattr(new_blueprint, type)(column.get_name())\n if not column.get_notnull():\n col.nullable()\n\n if column.get_default() is not None:\n col.default(QueryExpression(column.get_default()))\n\n # We add the primary keys\n if primary:\n new_blueprint.primary(primary)\n\n # We rebuild the indexes\n for index in indexes:\n index_columns = index['columns']\n new_index_columns = []\n index_name = index['name']\n\n for column_name in index_columns:\n if column_name in renamed_columns:\n new_index_columns.append(renamed_columns[column_name].get_name())\n else:\n new_index_columns.append(column_name)\n\n if index_columns != new_index_columns:\n index_name = None\n\n if index['unique']:\n new_blueprint.unique(new_index_columns, index_name)\n else:\n new_blueprint.index(index['columns'], index_name)\n\n for foreign_key in foreign_keys:\n fkey_from = foreign_key['from']\n if fkey_from in renamed_columns:\n fkey_from = renamed_columns[fkey_from].get_name()\n\n new_blueprint.foreign(fkey_from)\\\n .references(foreign_key['to'])\\\n .on(foreign_key['table'])\\\n .on_delete(foreign_key['on_delete'])\\\n .on_update(foreign_key['on_update'])\n\n # We create the table\n sql += new_blueprint.to_sql(None, self)\n\n # We reinsert the data into the new table\n sql.append('INSERT INTO %s (%s) SELECT %s FROM %s'\n % (self.wrap_table(table),\n ', '.join(new_column_names),\n self.columnize(old_column_names),\n self.wrap_table(temp_table)\n ))\n\n # Finally we drop the temporary table\n sql += Blueprint(temp_table).drop().to_sql(None, self)\n\n return sql", "def rename_columns(df):\n df = df.rename(columns={'Sample Name':'Sample','Gene Name': 'Target', 'Condition Name': 'Treatment'})\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get mapping from column groups to column positions.
def _create_group_to_col_position(column_groups): if column_groups is not None: group_to_col_index = {group: [] for group in list(set(column_groups))} for i, group in enumerate(column_groups): group_to_col_index[group].append(i) else: group_to_col_index = None return group_to_col_index
[ "def _get_column_mapping(cls) -> Dict[str, str]:\n pass", "def header_col_number_mapping(worksheet_lol):\n header_col_number_dict = dict()\n for col in range(len(worksheet_lol)):\n header_col_number_dict[worksheet_lol[col][0]] = col\n return header_col_number_dict", "def get_group_positions(u, indi):\n positions = []\n for i in indi.correction_groups:\n selstr = 'bynum %d' % i[0]\n for j in i[1:]:\n selstr += ' or bynum %d' % j\n positions.append(u.select_atoms(selstr).positions)\n return positions", "def _genposmap(self):\n mc = self._pos.mc\n\n rngmap = np.zeros(mc.shape)\n for rngind, rng in enumerate(self._ranges):\n rng = self._ranges[rngind,:]\n # rngarray: 1 where mc matches current range, 0 where not\n rngarray = ((mc > rng[0]) & (mc < rng[1])).astype(int)\n rngarray *= (rngind + 1) # add one to differentiate between 0 indeces and\n # unranged points\n rngmap += rngarray\n\n self._posmap = rngmap", "def fixed_width_columns(self):\n return tuple(zip(self.columns, self.offsets))", "def aligned_cols(self):\n return {t.col for t in self.stream if t.aligned}", "def indices_groups(self):\n if self._indices_groups is None:\n indices = []\n for idx, parameter in enumerate(self.parameters_ordered):\n if isinstance(parameter, ParameterGroup):\n for j in range(len(parameter.key)):\n indices.append(idx)\n else:\n indices.append(idx)\n\n self._indices_groups = np.array(indices, dtype=np.int64)\n # self._indices_groups = tuple(indices)\n\n return self._indices_groups", "def _assign_symbols_from_groups(cmap_ops):\n\n cmap_ops.phase('assign symbols from groups')\n with open('codepoint_groups.txt', 'r') as f:\n for lineix, line in enumerate(f):\n ix = line.find('#')\n if ix >= 0:\n line = line[:ix]\n line = line.strip()\n if not line:\n continue\n\n cols = [s.strip() for s in line.split(';')]\n if not len(cols) == 3:\n print ('incorrect cols on line %d \"%s\"' % (lineix, line))\n if cols[0] == '':\n # no assignments for this line\n continue\n\n add, remove = [], []\n for s in cols[0].split():\n if s.startswith('-'):\n remove.append(s[1:])\n else:\n add.append(s)\n name = cols[1]\n\n # We use parens to delimit parts of the ranges that are 'for\n # reference' but should not impact codepoint assignment.\n # since parse_int_ranges doesn't understand these, strip\n # out the parenthesized sections. These don't nest but we\n # don't check for this, only that open ranges are closed.\n ranges = cols[2]\n parts = None\n ix = 0\n while ix < len(ranges):\n open_p = ranges.find('(', ix)\n if open_p < 0:\n if parts is not None:\n parts.append(ranges[ix:].strip())\n break\n close_p = ranges.find(')', open_p+1)\n if close_p < 0:\n raise Exception(\n 'unclosed paren in ranges on line %d \"%s\"' % (lineix, line))\n if parts is None:\n parts = []\n parts.append(ranges[ix:open_p])\n ix = close_p + 1\n if parts:\n ranges = ' '.join(parts)\n\n try:\n cps = tool_utils.parse_int_ranges(ranges)\n except Exception as err:\n sys.stderr.write(err + '\\n')\n sys.stderr.write(cols[2] + '\\n')\n sys.stderr.write('problem on %d \"%s\"\\n' % (lineix, line))\n raise\n if len(cps) > 50:\n sys.stderr.write('large range (%d) on %d \"%s\"\\n' % (\n len(cps), lineix, line))\n\n cmap_ops.log('group: %s (%d)' % (name, len(cps)))\n if add:\n cmap_ops.add_all_to_all(cps, add)\n if remove:\n cmap_ops.remove_all_from_all(cps, remove)", "def facet_cell_map(self):\n return op2.Map(self.set, self.bottom_set, self._rank, self.facet_cell,\n \"facet_to_cell_map\")", "def _minimap_to_grid(self, pos_name):\n for k in range(len(self.minimap)):\n for l in range(len(self.minimap[k])):\n if pos_name == self.minimap[k][l]:\n cordx = l*41\n cordy = k*41\n return cordx, cordy", "def getColumns(self):\n\t\treturn tuple(\n\t\t\t\ttuple(self.rows[rowInd][colInd] for rowInd in self.indices)\n\t\t\tfor colInd in self.indices)", "def getPositionsDict(self):\n return {ID: self.elements[ID].getPosition() for ID in self.elements}", "def get_positions_map(self) -> Dict[str, int]:\n positionMap: Dict[str, int] = {}\n for item in self.get_open_positions():\n key = item.epic + \"-\" + item.direction.name\n if key in positionMap:\n positionMap[key] = item.size + positionMap[key]\n else:\n positionMap[key] = item.size\n return positionMap", "def _column_idx(self):\n column_element_ids = self._columns_dimension.element_ids\n sort_column_id = self._order_spec.element_id\n # --- Need to translate the element id to the shimmed element id\n sort_column_id = self._columns_dimension.translate_element_id(sort_column_id)\n return column_element_ids.index(sort_column_id)", "def get_group_indices(groups_list,group_number):\r\n file = open(groups_list, 'r')\r\n lines=file.readlines()\r\n group_indices = np.fromstring(lines[group_number],sep=\"\\t\", dtype = int)\r\n\r\n return group_indices", "def _get_column_values(table, col_index):\n index_to_values = {}\n for row_index, row in table.iterrows():\n text = normalize_for_match(row[col_index].text)\n index_to_values[row_index] = list(_get_numeric_values(text))\n return index_to_values", "def cell_mapping(self,cell) :\n\n j = np.floor(cell/self.param.n_x)\n i = cell - j*self.param.n_x\n\n return i,j", "def _column_idx(self):\n column_element_ids = self._columns_dimension.element_ids\n sort_column_id = self._order_spec.insertion_id\n # --- Need to translate the element id to the shimmed element id\n sort_column_id = self._columns_dimension.translate_element_id(sort_column_id)\n return column_element_ids.index(sort_column_id)", "def _columns_for_cells(self, cells):\n if np.any(cells < self.local_range[0]) or np.any(cells >= self.local_range[1]):\n raise ValueError('cells are not in bounds')\n\n local_cells = cells - self.local_range[0]\n columns = local_cells // self.cells_per_column\n return columns.astype('int32')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge value and inference series. Return string series with parameter values and precision values below respective param values.
def _combine_series(value_sr, inference_sr): value_df = value_sr.to_frame(name="") original_cols = value_df.columns value_df.reset_index(drop=False, inplace=True) index_names = [item for item in value_df.columns if item not in original_cols] # set the index to even numbers, starting at 0 value_df.index = value_df.index * 2 inference_df = inference_sr.to_frame(name="") inference_df.reset_index(drop=False, inplace=True) # set the index to odd numbers, starting at 1 inference_df.index = (inference_df.index * 2) + 1 inference_df[index_names[-1]] = "" df = pd.concat([value_df, inference_df]).sort_index() df.set_index(index_names, inplace=True, drop=True) return df[""]
[ "def round(self, decimals: int) -> Series:", "def get_integrated_benchmarking_fields_series_for_setFilters_df(df):\n\n # get a df where each row is one df\n df_best_filters = df.groupby(\"svtype\").apply(get_best_less_conservative_row_df_benchmark)\n\n # debug when there are filters_dict\n if \"filters_dict\" in set(df_best_filters.keys()):\n\n if len(set(df_best_filters[\"filters_dict\"].apply(get_dict_as_tuple)))!=1: \n pass\n #raise ValueError(\"There are more than 1 filtersDict\")\n\n # initialize a dict that will contain all the integrated filters\n integrated_benchmarking_results_dict = {}\n\n # get the numeric vals\n for f in [\"FN\", \"FP\", \"TP\", \"nevents\"]: integrated_benchmarking_results_dict[f] = sum(df_best_filters[f])\n\n # get through the event IDs \n for f in ['TP_predictedIDs', 'false_negatives_knownIDs', 'false_positives_predictedIDs', 'true_positives_knownIDs', 'true_positives_predictedIDs']: integrated_benchmarking_results_dict[f] = \"||\".join(df_best_filters[f].apply(str))\n\n # add the calculation of accuracy statistics\n TP = integrated_benchmarking_results_dict[\"TP\"]\n FP = integrated_benchmarking_results_dict[\"FP\"]\n FN = integrated_benchmarking_results_dict[\"FN\"]\n nevents = integrated_benchmarking_results_dict[\"nevents\"]\n\n if nevents==0: precision=1.0; recall=1.0\n else:\n if TP==0 and FP==0: precision = 0.0\n else: precision = TP/(TP + FP)\n recall = TP/(TP + FN)\n \n if precision<=0.0 or recall<=0.0: Fvalue = 0.0\n else: Fvalue = (2*precision*recall)/(precision+recall)\n\n integrated_benchmarking_results_dict[\"precision\"] = precision\n integrated_benchmarking_results_dict[\"recall\"] = recall\n integrated_benchmarking_results_dict[\"Fvalue\"] = Fvalue\n\n # add other fields\n integrated_benchmarking_results_dict[\"svtype\"] = \"integrated\"\n\n # add the fileds corresponding to when there are filters dicts\n if \"filters_dict\" in set(df_best_filters.keys()): \n\n integrated_benchmarking_results_dict[\"filters_dict\"] = get_represenative_filtersDict_for_filtersDict_list(list(df_best_filters[\"filters_dict\"]), type_filters=\"less_conservative\")\n integrated_benchmarking_results_dict[\"clove_max_rel_coverage_to_consider_del\"] = df_best_filters.loc[\"deletions\", \"clove_max_rel_coverage_to_consider_del\"]\n integrated_benchmarking_results_dict[\"clove_min_rel_coverage_to_consider_dup\"] = df_best_filters.loc[\"tandemDuplications\", \"clove_min_rel_coverage_to_consider_dup\"]\n\n integrated_benchmarking_results_dict[\"median_insert_size\"] = df_best_filters.loc[\"deletions\", \"median_insert_size\"]\n integrated_benchmarking_results_dict[\"median_insert_size_sd\"] = df_best_filters.loc[\"deletions\", \"median_insert_size_sd\"]\n integrated_benchmarking_results_dict[\"sorted_bam\"] = df_best_filters.loc[\"deletions\", \"sorted_bam\"]\n integrated_benchmarking_results_dict[\"median_coverage\"] = df_best_filters.loc[\"deletions\", \"median_coverage\"]\n\n return pd.Series(integrated_benchmarking_results_dict)", "def FeatureValueFormat(singlegene):\n\n ## based on the feature set including for rQuant process each genes selected feature values. \n import numpy as np\n comp_exon = np.zeros((len(singlegene['exons']),), dtype=np.object)\n for i in range(len(singlegene['exons'])):\n comp_exon[i]= np.array(singlegene['exons'][i])\n singlegene['exons'] = comp_exon\n comp_transcripts = np.zeros((len(singlegene['transcripts']),), dtype=np.object)\n for i in range(len(singlegene['transcripts'])):\n comp_transcripts[i] = np.array(singlegene['transcripts'][i])\n singlegene['transcripts'] = comp_transcripts\n return singlegene", "def get_text(self):\n # FIXME Finish comments\n if self.value is None:\n result_str = str(self.value)\n else:\n format_str = '%%.%dg' % self.precision\n result_str = '['\n result_str += ', '.join([format_str % f for f in self.value])\n result_str += ']'\n return result_str", "def results(self):\r\n return pd.Series(\r\n {\r\n \"metric_bo\": getattr(self, \"metric_bo\", None),\r\n \"time_bo\": getattr(self, \"time_bo\", None),\r\n \"metric_train\": getattr(self, \"metric_train\", None),\r\n \"metric_test\": getattr(self, \"metric_test\", None),\r\n \"time_fit\": getattr(self, \"time_fit\", None),\r\n \"mean_bagging\": getattr(self, \"mean_bagging\", None),\r\n \"std_bagging\": getattr(self, \"std_bagging\", None),\r\n \"time_bagging\": getattr(self, \"time_bagging\", None),\r\n \"time\": getattr(self, \"time\", None),\r\n },\r\n name=self.name,\r\n )", "def _reindex_and_float_format_params(\n models, show_inference, confidence_intervals, number_format, add_trailing_zeros\n):\n dfs = _get_params_frames_with_common_index(models)\n cols_to_format = _get_cols_to_format(show_inference, confidence_intervals)\n formatted_frames, max_trail = _apply_number_formatting_frames(\n dfs, cols_to_format, number_format, add_trailing_zeros\n )\n return formatted_frames, max_trail", "def series():\n pass", "def net_parameters_to_dataframe(self, stringify_index=False):\n interactions, values = self.free_parameters, self.parameters.get_value()\n # now put everything in dataframe\n return pd.DataFrame({\n 'interaction': interactions,\n 'value': values\n }).set_index('interaction')", "def Statsmodels_Params(name, results, Explanatory, NumDecimal):\n if name == \"Holt Winter’s Exponential Smoothing\":\n ResultsParams = results.params\n # ResultsParams = [round(item, NumDecimal) for item in ResultsParams]\n\n # for item in range(0, len(Explanatory.columns)):\n # ResultsParams[item+1] = str(ResultsParams[item+1]) + ' ' + str(Explanatory.columns[item])\n\n # ResultsParams[0] = str(ResultsParams[0])\n # ResultsParams = ', '.join(ResultsParams)\n elif \"AR\" in name:\n ResultsParams = results.params\n ResultsParams = [round(item, NumDecimal) for item in ResultsParams]\n\n for item in range(0, len(Explanatory.columns)):\n ResultsParams[item + 1] = str(ResultsParams[item + 1]) + ' ' + str(Explanatory.columns[item])\n\n ResultsParams[0] = str(ResultsParams[0])\n # ResultsParams = ', '.join(ResultsParams)\n\n else:\n ResultsParams = results.params\n ResultsParams = [round(item, NumDecimal) for item in ResultsParams]\n\n for item in range(0, len(Explanatory.columns)):\n\n ResultsParams[item + 1] = str(ResultsParams[item + 1]) + ' ' + str(Explanatory.columns[item])\n\n ResultsParams[0] = str(ResultsParams[0])\n ResultsParams = ', '.join(ResultsParams)\n\n return ResultsParams", "def extend_constant(self, value: int | float | str | bool | None, n: int) -> Series:", "def format_p_value_for_num_iters(p,num_iters):\n if num_iters < 10:\n # this can be the last step of a long process, so we don't \n # want to fail\n return \"Too few iters to compute p-value (num_iters=%d)\" % num_iters\n decimal_places = int(log10(num_iters))\n result = ('%1.'+'%df' % decimal_places) % p\n return result", "def combination(first: pandas.Series, second: pandas.Series):\n return first.astype(str) + sep + second.astype(str)", "def pandasSeriesSerializer(series):\n ts = int(series.name.timestamp() * 1000)\n dataToSend = dict(ts=ts, values=series.to_dict())\n message = json.dumps(dataToSend).encode('utf-8')\n return message", "def add_to_plotvars(value, fdict, arg, res):\n if value == \"\":\n return\n if arg[\"type\"] == \"vtec_ps\":\n suffix = arg[\"name\"]\n value = html_escape(fdict.get(f\"phenomena{suffix}\", \"SV\"))\n res[\"pltvars\"].append(f\"phenomena{suffix}:{value}\")\n value = html_escape(fdict.get(f\"significance{suffix}\", \"W\"))\n res[\"pltvars\"].append(f\"significance{suffix}:{value}\")\n return\n if arg[\"type\"] == \"cmap\":\n return\n if isinstance(value, (str, int, float)):\n res[\"pltvars\"].append(f\"{arg['name']}:{value}\")\n elif isinstance(value, date):\n res[\"pltvars\"].append(f\"{arg['name']}:{value.strftime('%Y-%m-%d')}\")\n elif isinstance(value, datetime):\n res[\"pltvars\"].append(\n f\"{arg['name']}:{value.strftime('%Y-%m-%d %H%M')}\"\n )\n else:\n for val in value:\n res[\"pltvars\"].append(f\"{arg['name']}:{val}\")", "def _fmt_value(x):\n if precision is not None and isinstance(x, Number):\n return str(round(x, precision))\n else:\n return str(x)", "def test_create_train_X_y_output_when_y_is_series_10_and_exog_is_dataframe_of_float_int_category_steps_1():\n series = pd.DataFrame({'l1': pd.Series(np.arange(10), dtype=float), \n 'l2': pd.Series(np.arange(50, 60), dtype=float)})\n exog = pd.DataFrame({'exog_1': pd.Series(np.arange(100, 110), dtype=float),\n 'exog_2': pd.Series(np.arange(1000, 1010), dtype=int),\n 'exog_3': pd.Categorical(range(100, 110))})\n \n forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',\n lags=5, steps=1)\n results = forecaster.create_train_X_y(series=series, exog=exog) \n expected = (\n pd.DataFrame(\n data = np.array([[4., 3., 2., 1., 0., 54., 53., 52., 51., 50., 105., 1005.],\n [5., 4., 3., 2., 1., 55., 54., 53., 52., 51., 106., 1006.],\n [6., 5., 4., 3., 2., 56., 55., 54., 53., 52., 107., 1007.],\n [7., 6., 5., 4., 3., 57., 56., 55., 54., 53., 108., 1008.],\n [8., 7., 6., 5., 4., 58., 57., 56., 55., 54., 109., 1009.]], \n dtype=float),\n index = pd.RangeIndex(start=5, stop=10, step=1),\n columns = ['l1_lag_1', 'l1_lag_2', 'l1_lag_3', 'l1_lag_4', 'l1_lag_5', \n 'l2_lag_1', 'l2_lag_2', 'l2_lag_3', 'l2_lag_4', 'l2_lag_5',\n 'exog_1_step_1', 'exog_2_step_1']\n ).astype({'exog_1_step_1': float, \n 'exog_2_step_1': int}).assign(exog_3_step_1=pd.Categorical(range(105, 110), categories=range(100, 110))\n ),\n {1: pd.Series(\n data = np.array([5., 6., 7., 8., 9.], dtype=float), \n index = pd.RangeIndex(start=5, stop=10, step=1),\n name = \"l1_step_1\"\n )\n }\n )\n\n pd.testing.assert_frame_equal(results[0], expected[0])\n assert isinstance(results[1], dict)\n assert all(isinstance(x, pd.Series) for x in results[1].values())\n assert results[1].keys() == expected[1].keys()\n for key in expected[1]: \n pd.testing.assert_series_equal(results[1][key], expected[1][key])", "def test_predict_output_when_regressor_is_LinearRegression_with_transform_series_as_dict():\n forecaster = ForecasterAutoregMultiSeries(\n regressor = LinearRegression(),\n lags = 5,\n transformer_series = {'1': StandardScaler(), '2': MinMaxScaler()}\n )\n forecaster.fit(series=series)\n predictions = forecaster.predict(steps=5, levels=['1'])\n\n expected = pd.DataFrame(\n data = np.array([0.59619193, 0.46282914, 0.41738496, 0.48522676, 0.47525733]),\n index = pd.RangeIndex(start=50, stop=55, step=1),\n columns = ['1']\n )\n \n pd.testing.assert_frame_equal(predictions, expected)", "def modify_pr(data):\n\n data = data.loc[~pd.isnull(data[PRECISION_COLUMN]), :]\n precision = np.insert(data[PRECISION_COLUMN].values, 0, data[PRECISION_COLUMN].iloc[0])\n recall = np.insert(data[RECALL_COLUMN].values, 0, 0)\n return recall, precision", "def to_pandoc_table(self, experiment, **kwargs):\n t = []\n\n\n t.append('| Simulation Parameter | Value or Values |\\n')\n t.append('|:---------------------------------------|:--------------------------------------------------|\\n')\n\n for var in self._get_public_variables():\n s = '| '\n s += self.parameter_labels[var[0]]\n s += ' | '\n\n\n # need to know if var[1] is a single integer, or a list\n if hasattr(var[1], '__iter__'):\n s += ', '.join(map(str, var[1]))\n else:\n s += str(var[1])\n\n s += ' | \\n'\n t.append(s)\n\n\n\n\n return ''.join(t)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the LaTex script of the notes section.
def _generate_notes_latex( append_notes, notes_label, significance_levels, custom_notes, df ): n_levels = df.index.nlevels n_columns = len(df.columns) significance_levels = sorted(significance_levels) notes_text = "" if append_notes: notes_text += "\\midrule\n" notes_text += "\\textit{{{}}} & \\multicolumn{{{}}}{{r}}{{".format( notes_label, str(n_columns + n_levels - 1) ) # iterate over penultimate significance_lelvels since last item of legend # is not followed by a semi column for i in range(len(significance_levels) - 1): star = "*" * (len(significance_levels) - i) notes_text += f"$^{{{star}}}$p$<${significance_levels[i]};" notes_text += "$^{*}$p$<$" + str(significance_levels[-1]) + "} \\\\\n" if custom_notes: amp_n = "&" * n_levels if isinstance(custom_notes, list): if not all(isinstance(n, str) for n in custom_notes): raise ValueError( f"""Each custom note can only be of string type. The following notes: {[n for n in custom_notes if type(n) != str]} are of types {[type(n) for n in custom_notes if type(n) != str]} respectively.""" ) for n in custom_notes: notes_text += """ {}\\multicolumn{{{}}}{{r}}\\textit{{{}}}\\\\\n""".format( amp_n, n_columns, n ) elif isinstance(custom_notes, str): notes_text += "{}\\multicolumn{{{}}}{{r}}\\textit{{{}}}\\\\\n".format( amp_n, n_columns, custom_notes ) else: raise TypeError( f"""Custom notes can be either a string or a list of strings. Not: {type(custom_notes)}.""" ) return notes_text
[ "def generate_note(stem: str) -> str:\n note = f\"\"\"\n.. note::\n An *xml* file containing the defaults for the `{stem}` calculator can be created via `-p {stem} -o FILENAME` command line options `\n\"\"\"\n return note", "def gen_readme():\n\n doc = '''\n=== README for Tornastrap ===\n\nApplications are self contained as packages and are added to the\n SETTINGS which would take care of adding apps to the path at runtime.\n\n'''\n\n return doc", "def gen_release_notes() -> str:\n # this is non-portable, so replace if using script with a diff repo\n notes_text = [\"#### Docker Image\\n\\n\", f\"Docker image available: {docker_root}:{args.tag}\\n\\n\"]\n\n stdout, _ = run_cmd([\"git\", \"show\", f\"{args.tag}:docs/releasenotes/README.md\"])\n skip_lines = True\n for line in StringIO(stdout):\n if line.startswith(\"### Highlights\") and skip_lines:\n skip_lines = False\n elif line.startswith(\"## Version\") and skip_lines is False:\n # only include most recent notes\n break\n\n if skip_lines is False:\n notes_text.append(\n line.replace(\n \"./img/\",\n f\"https://gitlab.com/alleles/ella/raw/{args.tag}/docs/releasenotes/img/\",\n )\n )\n return \"\".join(notes_text)", "def about(self):\n\n self.show_notebook_if_not_shown()\n\n a1 = ' ________________________________________________'\n a2 = ' _ _ '\n a3 = ' /| / '\n a4 = ' --_/_---_--_------__---/-| -/-----__--_/_----__-'\n a5 = ' / / / ) / ) / | / / ) / /___)'\n a6 = ' _(_ __/_/__/___/___/_/___|/____(___/_(_ __(___ _'\n a7 = ' / '\n a8 = ' tmpNote / Another text editor.'\n\n asciiart = '{0}\\n{1}\\n{2}\\n{3}\\n{4}\\n{5}\\n{6}\\n{7}\\n\\n\\n\\n'.format(a1,a2,a3,a4,a5,a6,a7,a8)\n info = ' Version {0}\\n License {1}\\n {2}'.format(__version__, __license__, __copyright__)\n text = '{0}{1}'.format(asciiart, info)\n\n readonly = True\n page = TxtCtrl(self, text, readonly)\n self.pages.append(page)\n\n page.SetUndoCollection(False)\n page.SetBufferedDraw(True)\n page.SetWrapMode(stc.STC_WRAP_NONE)\n\n page.python_syntax = False\n page.folding_symbols = False\n page.line_numbers = False\n page.word_wrap = False\n page.path = ''\n page.filename = 'About tmpNote'\n page.datetime = str(datetime.datetime.now())\n\n # http://www.scintilla.org/ScintillaDoc.html#Margins\n page.SetMarginLeft(6) # Text area left margin.\n page.SetMarginWidth(0, 0) # Line numbers margin.\n page.SetMarginWidth(1, 0) # Non-folding symbols margin.\n page.SetMarginWidth(2, 0) # Folding symbols margin.\n\n self.notebook.AddPage(\n page = page,\n text = 'About tmpNote',\n select = True\n )\n\n self.set_styles_default()\n page.SetFocus()\n page.SetSavePoint()", "def genpage_tool(tool_note):\n return ('**{}**\\n\\n'\n 'Command: *{}*\\n\\n'\n 'Version: {}\\n\\n'\n '{}\\n').format(\n tool_note.name,\n tool_note.cmd,\n tool_note.ver,\n render_links(tool_note.desc))", "def single_notes(help=\"Generate a dataset of single notes in ./single_notes\"):\n programs = [Instrument.LEAD_1_SQUARE, Instrument.LEAD_2_SAWTOOTH, Instrument.RECORDER]\n for program in programs:\n os.makedirs(f'./single_notes/{program}')\n for note in range(40, 100):\n midi, track = single_track_midi(program=0)\n track.append(mido.Message('note_on', note=note, velocity=80, time=0))\n track.append(mido.Message('note_off', note=note, velocity=127,\n time=Dur.quarter))\n for program in [Instrument.LEAD_1_SQUARE, Instrument.LEAD_2_SAWTOOTH,\n Instrument.RECORDER]:\n change_program(track, program) \n midi.save(temp_mid)\n fluidsynth_render(temp_mid, f'./single_notes/{program}/{program}_{note}.wav')", "def manual():\n return render_template(\"manual.html\")", "def build_incremental(note_filename):\n note = parse(os.path.join(NOTES_DIR, note_filename))\n write(os.path.join(DOCS_DIR, note[\"id\"] + \".html\"), note)\n build_index()", "def generate_titlepage(notes):\n # header\n text = '{} total actions have been recorded in this notebook.\\n\\n'.format(\n len(notes))\n # list of links to each action\n for i, n in enumerate(notes):\n text += '{}. [{}]({})\\n'.format(\n i+1,\n n.desc.text.split('\\n')[0],\n '{}/{}.md'.format(TEMP_DIR, n.uid))\n return text", "def get_notes(self):\n\t\tself.notes = input(\"Notes (Press enter if None): \")", "def genpage_data(data_note):\n return ('**{}**\\n\\n'\n '*{}*\\n\\n'\n 'Source: *{}*\\n\\n'\n '{}\\n').format(\n data_note.name,\n data_note.path,\n render_links(data_note.src),\n render_links(data_note.desc))", "def generate_latex(filename, eqns=[]):\r\n doc = Document()\r\n\r\n with doc.create(Section('Automatic Conjectures')):\r\n doc.append('These are the conjectures detected by the algorithm.')\r\n\r\n for eqn in eqns:\r\n with doc.create(Alignat(numbering=False, escape=False)) as agn:\r\n agn.append(eqn)\r\n\r\n doc.generate_pdf(filename, clean_tex=False)", "def show_notes(self):\n\t\tprint(\"You have the following to-do notes added: \\n\")\n\t\tfor n, note in enumerate(glob.glob(self.dir_address + '\\\\*.txt')):\n\t\t\ttitle = note.split('\\\\')\n\t\t\ttitle_name = title[-1].strip(\".txt\")\n\t\t\tprint(f\"{n+1}. {title_name}\")", "def script_description():\n\n desc = \"<!doctype html>\\n\\n<html lang=\\\"en\\\">\\n<body>\\n\"\n filename = next((x for x in [\"{}.md\".format(\n __name__), \"README.md\"] if x in os.listdir(script_path())), None)\n if filename:\n with open(os.path.join(script_path(), filename), \"r\", encoding=\"utf-8\") as f:\n try:\n desc += markdown.markdown(f.read(), extensions=[\"tables\"])\n except Exception as e:\n print(e)\n f.close()\n desc += \"\\n<h2>Script Information</h2>\\n<p>\\n<table width=\\\"90%\\\">\\n<tbody>\\n\"\n for x in [\"__version__\", \"__author__\"]:\n desc += \"<tr>\\n<td>{}:</td>\\n<td>{}</td>\\n</tr>\\n\".format(\n x.replace(\"__\", \"\").title(), eval(x))\n desc += \"<tr>\\n<td>{0}:</td>\\n<td><a href=\\\"{1}\\\">{1}</a></td>\\n</tr>\\n\".format(\n \"Website\", __website__)\n desc += \"</tbody>\\n</table>\\n</p>\\n</body>\\n</html>\\n\"\n return desc", "def note_view():\n google_notes = keep.all()\n\n os.system('clear')\n print(consts.YELLOW_BOLD)\n print(fig.renderText('Keep...'))\n\n if len(google_notes) == 0:\n print(consts.RED, end='')\n print('You don\\'t have any notes!'.center(consts.WIDTH))\n # choices = [\n # '✎ Make a New Note ✎',\n # '✎ Make a New List ✎',\n # '⛔ Exit ⛔'\n # ]\n choices = [\n consts.MAKE_NOTE,\n consts.MAKE_LIST,\n consts.EXIT\n ]\n note_list = []\n else:\n global continue_printing_row\n\n note_list = NoteGrid.listify_google_notes(google_notes)\n note_list = NoteGrid.wrap_text(note_list)\n note_list = NoteGrid.add_list_border(note_list)\n NoteGrid.print_grid(note_list, continue_printing_row)\n print('\\n')\n continue_printing_row = True\n # choices = [\n # '✎ Make a New Note ✎',\n # '✎ Make a New List ✎',\n # 'Edit a Note',\n # '⛔ Exit ⛔']\n choices = [\n consts.MAKE_NOTE,\n consts.MAKE_LIST,\n consts.EDIT_NOTE,\n consts.EXIT\n ]\n\n initial_prompt = [\n {\n 'type': 'list',\n 'name': 'options',\n 'message': consts.SELECT_OPTION,\n 'choices': choices\n }]\n initial_selection = prompt(initial_prompt)\n\n if initial_selection.get('options') == consts.MAKE_NOTE:\n make_a_note(note_list)\n elif initial_selection.get('options') == consts.MAKE_LIST:\n make_a_list(note_list)\n elif initial_selection.get('options') == consts.EDIT_NOTE:\n edit_note_selector_view(note_list, google_notes)\n elif initial_selection.get('options') == consts.EXIT:\n raise SystemExit", "def generate_readme(self):\n print(\"## Examples of settings runtime params\")\n print(\"### Command-line parameters\")\n print(\"```\")\n self.generate_command()\n print(\"```\")\n print(\"### Environment variables\")\n print(\"```\")\n self.generate_env()\n print(\"```\")\n print(\"### ini file\")\n print(\"```\")\n self.generate_ini()\n print(\"```\")\n print(\"### docker run\")\n print(\"```\")\n self.generate_docker_run()\n print(\"```\")\n print(\"### docker compose\")\n print(\"```\")\n self.generate_docker_compose()\n print(\"```\")\n print(\"### kubernetes\")\n print(\"```\")\n self.generate_kubernetes()\n print(\"```\")\n print(\"### drone plugin\")\n print(\"```\")\n self.generate_drone_plugin()\n print(\"```\")", "def generate_body_latex(self):\n body = ''\n for cov_name in self.cov_names:\n body += self.generate_cov_rows_latex(cov_name)\n body += ' '\n for _ in range(self.num_models):\n body += '& '\n body += '\\\\\\\\\\n'\n\n return body", "def create_note(citekey, config, bbt, force, template):\n candidates = bbt.search_citekey_in_bbp(citekey)\n if not candidates:\n click.echo(\"No results found for \" + citekey)\n sys.exit()\n elif len(candidates) != 1:\n click.echo(\"Something wrong happened here. We have too many candidates...\")\n sys.exit()\n else:\n candidate = candidates[0]\n fieldValues = bbt.extract_fields(candidate)\n\n # Fill template\n try:\n note = Note(citekey, fieldValues, config, template)\n except BadTemplateName as e:\n click.echo(e)\n sys.exit()\n\n # Write output file\n notes_dir = Path(config[\"notes\"])\n outfile = notes_dir / f\"{citekey}.md\"\n\n if outfile.exists():\n if force:\n click.echo(f\"Overwriting {str(outfile)}\")\n else:\n choice = click.confirm(\n \"This file already exists. Edit instead?\"\n \"Use --force to overwrite files.\"\n )\n if choice:\n os.system(f\"{config['editor']} {str(outfile)}\")\n else:\n click.echo(f\"Writing {str(outfile)}\")\n\n # Write note\n outfile.write_text(note.render())", "def set_notes_templatefile(self):\n\n self.par_notestemplate = filedialog.askopenfilename()\n self.entry_notestemplate.delete(0, END)\n self.entry_notestemplate.insert(0, self.par_notestemplate)\n LOGGER.debug('README template: %s', self.par_notestemplate)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the html script of the notes section of the estimation table.
def _generate_notes_html( append_notes, notes_label, significance_levels, custom_notes, df ): n_levels = df.index.nlevels n_columns = len(df.columns) significance_levels = sorted(significance_levels) notes_text = """<tr><td colspan="{}" style="border-bottom: 1px solid black"> </td></tr>""".format( n_columns + n_levels ) if append_notes: notes_text += """ <tr><td style="text-align: left">{}</td><td colspan="{}" style="text-align: right">""".format( notes_label, n_columns + n_levels - 1 ) for i in range(len(significance_levels) - 1): stars = "*" * (len(significance_levels) - i) notes_text += f"<sup>{stars}</sup>p&lt;{significance_levels[i]}; " notes_text += f"""<sup>*</sup>p&lt;{significance_levels[-1]} </td>""" if custom_notes: if isinstance(custom_notes, list): if not all(isinstance(n, str) for n in custom_notes): raise ValueError( f"""Each custom note can only be of string type. The following notes: {[n for n in custom_notes if type(n) != str]} are of types {[type(n) for n in custom_notes if type(n) != str]} respectively.""" ) notes_text += """ <tr><td></td><td colspan="{}"style="text-align: right">{}</td></tr> """.format( n_columns + n_levels - 1, custom_notes[0] ) if len(custom_notes) > 1: for i in range(1, len(custom_notes)): notes_text += """ <tr><td></td><td colspan="{}"style="text-align: right"> {}</td></tr> """.format( n_columns + n_levels - 1, custom_notes[i] ) elif isinstance(custom_notes, str): notes_text += """ <tr><td></td><td colspan="{}"style="text-align: right">{}</td></tr> """.format( n_columns + n_levels - 1, custom_notes ) else: raise TypeError( f"""Custom notes can be either a string or a list of strings, not {type(custom_notes)}.""" ) return notes_text
[ "def _generate_notes_latex(\n append_notes, notes_label, significance_levels, custom_notes, df\n):\n n_levels = df.index.nlevels\n n_columns = len(df.columns)\n significance_levels = sorted(significance_levels)\n notes_text = \"\"\n if append_notes:\n notes_text += \"\\\\midrule\\n\"\n notes_text += \"\\\\textit{{{}}} & \\\\multicolumn{{{}}}{{r}}{{\".format(\n notes_label, str(n_columns + n_levels - 1)\n )\n # iterate over penultimate significance_lelvels since last item of legend\n # is not followed by a semi column\n for i in range(len(significance_levels) - 1):\n star = \"*\" * (len(significance_levels) - i)\n notes_text += f\"$^{{{star}}}$p$<${significance_levels[i]};\"\n notes_text += \"$^{*}$p$<$\" + str(significance_levels[-1]) + \"} \\\\\\\\\\n\"\n if custom_notes:\n amp_n = \"&\" * n_levels\n if isinstance(custom_notes, list):\n if not all(isinstance(n, str) for n in custom_notes):\n raise ValueError(\n f\"\"\"Each custom note can only be of string type.\n The following notes:\n {[n for n in custom_notes if type(n) != str]} are of types\n {[type(n) for n in custom_notes if type(n) != str]}\n respectively.\"\"\"\n )\n for n in custom_notes:\n notes_text += \"\"\"\n {}\\\\multicolumn{{{}}}{{r}}\\\\textit{{{}}}\\\\\\\\\\n\"\"\".format(\n amp_n, n_columns, n\n )\n elif isinstance(custom_notes, str):\n notes_text += \"{}\\\\multicolumn{{{}}}{{r}}\\\\textit{{{}}}\\\\\\\\\\n\".format(\n amp_n, n_columns, custom_notes\n )\n else:\n raise TypeError(\n f\"\"\"Custom notes can be either a string or a list of strings.\n Not: {type(custom_notes)}.\"\"\"\n )\n return notes_text", "def genpage_data(data_note):\n return ('**{}**\\n\\n'\n '*{}*\\n\\n'\n 'Source: *{}*\\n\\n'\n '{}\\n').format(\n data_note.name,\n data_note.path,\n render_links(data_note.src),\n render_links(data_note.desc))", "def generate_footer_html(self):\n footer = '<td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n\n if not self.show_footer:\n return footer\n footer += self.generate_observations_html()\n footer += self.generate_r2_html()\n footer += self.generate_r2_adj_html()\n if self.show_residual_std_err:\n footer += self.generate_resid_std_err_html()\n if self.show_f_statistic:\n footer += self.generate_f_statistic_html()\n footer += '<tr><td colspan=\"' + str(self.num_models + 1) + '\" style=\"border-bottom: 1px solid black\"></td></tr>'\n footer += self.generate_notes_html()\n footer += '</table>'\n\n return footer", "def notes_xml(self):\n\n if self.notes == []:\n return ''\n xml = '<Notes>\\n'\n for note in self.notes:\n xml += note\n xml += '</Notes>\\n'\n return xml", "def generate_titlepage(notes):\n # header\n text = '{} total actions have been recorded in this notebook.\\n\\n'.format(\n len(notes))\n # list of links to each action\n for i, n in enumerate(notes):\n text += '{}. [{}]({})\\n'.format(\n i+1,\n n.desc.text.split('\\n')[0],\n '{}/{}.md'.format(TEMP_DIR, n.uid))\n return text", "def theory():\n return render_template('theory.html')", "def start_html_file(self):\n with self.file_path_html.open('w') as f:\n f.write(\"\"\"\n <html>\n <head> \n <style>\n body {padding: 0;margin: 0;}\n h2 {margin-left: 10px; margin-right:10px;}\n h3 {margin-left: 10px; margin-right:10px;}\n p {margin-left: 10px; margin-right:10px;}\n div2 {display: inline-block; width: 650px; height: 650px; margin-left: 30px}\n table[data-source='cms-table'] {\n margin: 0 auto;\n border-spacing: 0;\n }\n table[data-source='cms-table'] th,\n table[data-source='cms-table'] td {\n border-bottom: 1px solid var(--grey-100);\n padding: 0.3rem 1.2rem 0.3rem 1.2rem;\n }\n table[data-type='math-table'] th,\n table[data-type='math-table'] td {\n text-align: center;\n }\n table[data-type='number-table'] th,\n table[data-type='number-table'] td {\n text-align: right;\n }\n table[data-type='text-table'] th,\n table[data-type='text-table'] td {\n text-align: left;\n }\n .left-panel {width: 50%;}\n .right-panel {width: 50%;height: 100%;overflow: hidden; position: fixed;}\n .learnosity-iframe {height: 100%;width: 100%;border:10px;background-color:lightgray;}\n .learnosity-content {display:none;}\n .learnosity-form {text-align:center;}\n .learnosity-button {margin-left:auto; margin-right:auto;}\n </style>\n \"\"\" + mathjax_scripts + \"\"\"\n <script src=\"https://www.desmos.com/api/v1.3/calculator.js?apiKey=dcb31709b452b1cf9dc26972add0fda6\"></script>\n </head>\n <body>\n <div class=\"left-panel\", style='display: inline-block; float: left;'>\n \"\"\")", "def tips_and_tricks():\n\n return render_template('tips-and-tricks.html')", "def generate_body_latex(self):\n body = ''\n for cov_name in self.cov_names:\n body += self.generate_cov_rows_latex(cov_name)\n body += ' '\n for _ in range(self.num_models):\n body += '& '\n body += '\\\\\\\\\\n'\n\n return body", "def gen_release_notes() -> str:\n # this is non-portable, so replace if using script with a diff repo\n notes_text = [\"#### Docker Image\\n\\n\", f\"Docker image available: {docker_root}:{args.tag}\\n\\n\"]\n\n stdout, _ = run_cmd([\"git\", \"show\", f\"{args.tag}:docs/releasenotes/README.md\"])\n skip_lines = True\n for line in StringIO(stdout):\n if line.startswith(\"### Highlights\") and skip_lines:\n skip_lines = False\n elif line.startswith(\"## Version\") and skip_lines is False:\n # only include most recent notes\n break\n\n if skip_lines is False:\n notes_text.append(\n line.replace(\n \"./img/\",\n f\"https://gitlab.com/alleles/ella/raw/{args.tag}/docs/releasenotes/img/\",\n )\n )\n return \"\".join(notes_text)", "def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot", "def script_description():\n\n desc = \"<!doctype html>\\n\\n<html lang=\\\"en\\\">\\n<body>\\n\"\n filename = next((x for x in [\"{}.md\".format(\n __name__), \"README.md\"] if x in os.listdir(script_path())), None)\n if filename:\n with open(os.path.join(script_path(), filename), \"r\", encoding=\"utf-8\") as f:\n try:\n desc += markdown.markdown(f.read(), extensions=[\"tables\"])\n except Exception as e:\n print(e)\n f.close()\n desc += \"\\n<h2>Script Information</h2>\\n<p>\\n<table width=\\\"90%\\\">\\n<tbody>\\n\"\n for x in [\"__version__\", \"__author__\"]:\n desc += \"<tr>\\n<td>{}:</td>\\n<td>{}</td>\\n</tr>\\n\".format(\n x.replace(\"__\", \"\").title(), eval(x))\n desc += \"<tr>\\n<td>{0}:</td>\\n<td><a href=\\\"{1}\\\">{1}</a></td>\\n</tr>\\n\".format(\n \"Website\", __website__)\n desc += \"</tbody>\\n</table>\\n</p>\\n</body>\\n</html>\\n\"\n return desc", "def note_view():\n google_notes = keep.all()\n\n os.system('clear')\n print(consts.YELLOW_BOLD)\n print(fig.renderText('Keep...'))\n\n if len(google_notes) == 0:\n print(consts.RED, end='')\n print('You don\\'t have any notes!'.center(consts.WIDTH))\n # choices = [\n # '✎ Make a New Note ✎',\n # '✎ Make a New List ✎',\n # '⛔ Exit ⛔'\n # ]\n choices = [\n consts.MAKE_NOTE,\n consts.MAKE_LIST,\n consts.EXIT\n ]\n note_list = []\n else:\n global continue_printing_row\n\n note_list = NoteGrid.listify_google_notes(google_notes)\n note_list = NoteGrid.wrap_text(note_list)\n note_list = NoteGrid.add_list_border(note_list)\n NoteGrid.print_grid(note_list, continue_printing_row)\n print('\\n')\n continue_printing_row = True\n # choices = [\n # '✎ Make a New Note ✎',\n # '✎ Make a New List ✎',\n # 'Edit a Note',\n # '⛔ Exit ⛔']\n choices = [\n consts.MAKE_NOTE,\n consts.MAKE_LIST,\n consts.EDIT_NOTE,\n consts.EXIT\n ]\n\n initial_prompt = [\n {\n 'type': 'list',\n 'name': 'options',\n 'message': consts.SELECT_OPTION,\n 'choices': choices\n }]\n initial_selection = prompt(initial_prompt)\n\n if initial_selection.get('options') == consts.MAKE_NOTE:\n make_a_note(note_list)\n elif initial_selection.get('options') == consts.MAKE_LIST:\n make_a_list(note_list)\n elif initial_selection.get('options') == consts.EDIT_NOTE:\n edit_note_selector_view(note_list, google_notes)\n elif initial_selection.get('options') == consts.EXIT:\n raise SystemExit", "def change_note(self):\n for wiz in self:\n sale = wiz.sale_id\n if sale:\n invoice_list = self.env['account.invoice']\n sale.write({\n 'inter_note': wiz.inter_note,\n 'work_note': wiz.work_note,\n 'delivery_note': wiz.delivery_note,\n 'invoice_note': wiz.invoice_note,\n 'customer_order_note': wiz.customer_order_note,\n })\n for invoice_line in sale.invoice_line_ids:\n if not invoice_line.invoice_id in invoice_list:\n invoice_list += invoice_line.invoice_id\n \n invoice_list.write({\n 'note_invoice_header': wiz.invoice_note,\n 'note_invoice_intern': wiz.inter_note,\n })\n picking_rs = sale.delivery_note_ids\n picking_rs.write({\n 'internal_note': wiz.inter_note,\n 'printed_note': wiz.delivery_note,\n })\n \n wiz.modification_mrp(sale, wiz.work_note)\n \n return {'type':'ir.actions.act_window_view_reload'}", "def show_notes(self):\n\t\tprint(\"You have the following to-do notes added: \\n\")\n\t\tfor n, note in enumerate(glob.glob(self.dir_address + '\\\\*.txt')):\n\t\t\ttitle = note.split('\\\\')\n\t\t\ttitle_name = title[-1].strip(\".txt\")\n\t\t\tprint(f\"{n+1}. {title_name}\")", "def table():\n return render_template('table.html')", "def to_html(self):\n body = \"\"\"<table>\\n<tr>\\n<th>num</th>\n <th>Reference</th>\n <th>output</th>\n <th>error_type</th>\n <th>local_cer</th>\n <th>distance</th>\n <th>sub</th>\n <th>ins</th>\n <th>del</th></tr><tbody>\"\"\"\n # create header\n for c, t in enumerate(self.multi_alignment_tokens):\n body += t.to_html(c)\n # something else\n # <p> annotation </p>\n body += '\\n</tbody>\\n</table>'\n return body", "def get_notes(self):\n\t\tself.notes = input(\"Notes (Press enter if None): \")", "def create_html_payment_instructions(shop_id: ShopID, creator: User) -> None:\n scope = _build_shop_snippet_scope(shop_id)\n\n language_codes_and_bodies = [\n (\n 'en',\n '''\n<p>Please transfer the total amount to this bank account:</p>\n\n<table class=\"index\" style=\"margin: 0 auto;\">\n <tr>\n <th>Recipient</th>\n <td>&lt;name&gt;</td>\n </tr>\n <tr>\n <th>IBAN</th>\n <td>&lt;IBAN&gt;</td>\n </tr>\n <tr>\n <th>BIC</th>\n <td>&lt;BIC&gt;</td>\n </tr>\n <tr>\n <th>Bank</th>\n <td>&lt;bank&gt;</td>\n </tr>\n <tr>\n <th>Amount</th>\n <td>{{ total_amount }}</td>\n </tr>\n <tr>\n <th>Purpose</th>\n <td>{{ order_number }}</td>\n </tr>\n</table>\n '''.strip(),\n ),\n (\n 'de',\n '''\n<p>Bitte überweise den Gesamtbetrag auf dieses Konto:</p>\n\n<table class=\"index\" style=\"margin: 0 auto;\">\n <tr>\n <th>Zahlungsempfänger</th>\n <td>&lt;Name&gt;</td>\n </tr>\n <tr>\n <th>IBAN</th>\n <td>&lt;IBAN&gt;</td>\n </tr>\n <tr>\n <th>BIC</th>\n <td>&lt;BIC&gt;</td>\n </tr>\n <tr>\n <th>Bank</th>\n <td>&lt;Bank&gt;</td>\n </tr>\n <tr>\n <th>Betrag</th>\n <td>{{ total_amount }}</td>\n </tr>\n <tr>\n <th>Verwendungszweck</th>\n <td>{{ order_number }}</td>\n </tr>\n</table>\n '''.strip(),\n ),\n ]\n\n for language_code, body in language_codes_and_bodies:\n snippet_service.create_snippet(\n scope, 'payment_instructions', language_code, creator, body\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert statsmodels like estimation result to estimagic like params dataframe.
def _extract_params_from_sm(model): to_concat = [] params_list = ["params", "pvalues", "bse"] for col in params_list: to_concat.append(getattr(model, col)) to_concat.append(model.conf_int()) params_df = pd.concat(to_concat, axis=1) params_df.columns = ["value", "p_value", "standard_error", "ci_lower", "ci_upper"] return params_df
[ "def net_parameters_to_dataframe(self, stringify_index=False):\n interactions, values = self.free_parameters, self.parameters.get_value()\n # now put everything in dataframe\n return pd.DataFrame({\n 'interaction': interactions,\n 'value': values\n }).set_index('interaction')", "def generate_pandas_data(fit_results):\n data = {}\n data[\"q\"] = fit_results.q\n for par in fit_results.parameter:\n data[str(par.values)] = fit_results.parameters.loc[par].values\n pd_data_frame = pd.DataFrame(data = data)\n return pd_data_frame", "def rearrange_lmfit_2obj(result):\n arr = np.array([result.params['flux_a'].value,result.params['hlr_a'].value,result.params['e1_a'].value,result.params['e2_a'].value,result.params['x0_a'].value,result.params['y0_a'].value,\n result.params['flux_b'].value,result.params['hlr_b'].value,result.params['e1_b'].value,result.params['e2_b'].value,result.params['x0_b'].value,result.params['y0_b'].value])\n arr = pd.Series(arr,index=['flux_a','hlr_a','e1_a','e2_a','x0_a','y0_a',\n 'flux_b','hlr_b','e1_b','e2_b','x0_b','y0_b'])\n return arr", "def from_fit(result):\n params = result.params\n return {name : mp.gummy(param.value,param.stderr) for name,param in params.items()}", "def Statsmodels_Params(name, results, Explanatory, NumDecimal):\n if name == \"Holt Winter’s Exponential Smoothing\":\n ResultsParams = results.params\n # ResultsParams = [round(item, NumDecimal) for item in ResultsParams]\n\n # for item in range(0, len(Explanatory.columns)):\n # ResultsParams[item+1] = str(ResultsParams[item+1]) + ' ' + str(Explanatory.columns[item])\n\n # ResultsParams[0] = str(ResultsParams[0])\n # ResultsParams = ', '.join(ResultsParams)\n elif \"AR\" in name:\n ResultsParams = results.params\n ResultsParams = [round(item, NumDecimal) for item in ResultsParams]\n\n for item in range(0, len(Explanatory.columns)):\n ResultsParams[item + 1] = str(ResultsParams[item + 1]) + ' ' + str(Explanatory.columns[item])\n\n ResultsParams[0] = str(ResultsParams[0])\n # ResultsParams = ', '.join(ResultsParams)\n\n else:\n ResultsParams = results.params\n ResultsParams = [round(item, NumDecimal) for item in ResultsParams]\n\n for item in range(0, len(Explanatory.columns)):\n\n ResultsParams[item + 1] = str(ResultsParams[item + 1]) + ' ' + str(Explanatory.columns[item])\n\n ResultsParams[0] = str(ResultsParams[0])\n ResultsParams = ', '.join(ResultsParams)\n\n return ResultsParams", "def process(results):\n \n results = results.copy()\n results['hyperparameters'] = results['hyperparameters'].map(ast.literal_eval)\n \n # Sort with best values on top\n results = results.sort_values('score', ascending = False).reset_index(drop = True)\n \n # Create dataframe of hyperparameters\n hyp_df = pd.DataFrame(columns = list(results.loc[0, 'hyperparameters'].keys()))\n\n # Iterate through each set of hyperparameters that were evaluated\n for i, hyp in enumerate(results['hyperparameters']):\n hyp_df = hyp_df.append(pd.DataFrame(hyp, index = [0]), \n ignore_index = True, sort= True)\n \n # Put the iteration and score in the hyperparameter dataframe\n hyp_df['iteration'] = results['iteration']\n hyp_df['score'] = results['score']\n \n return hyp_df", "def save_model_parameters_to_csv(self):\n\n pd.DataFrame(\n [\n self.num_element_filter,\n self.temperature_filter,\n self.ammonia_filter,\n self.ru_filter,\n self.pressure_filter,\n self.sv_filter,\n self.version,\n self.target_columns,\n self.drop_columns,\n self.group_columns,\n self.hold_columns,\n\n ]\n ).to_csv('{}//eval//{}_modelparam.csv'.format(self.svfl, self.svnm))", "def make_results(self):\n fitted = self.fitted\n self.results = OrderedDict()\n ## fitting results\n self.results.update(\n nfev = fitted.nfev,\n ndata = fitted.ndata,\n nvarys = fitted.nvarys, # number of varible paramters\n nfree = fitted.nfree, # degree of freem\n chisqr = fitted.chisqr,\n redchi = fitted.redchi,\n aic = fitted.aic,\n bic = fitted.bic)\n params = fitted.params\n pnames = list(params.keys())\n pvalues = OrderedDict()\n for pn in pnames:\n par = params.get(pn)\n pvalues[pn] = [par.value, par.min, par.max, par.vary]\n self.results[\"params\"] = pvalues\n ## confidence intervals\n if hasattr(self, \"ci\") and self.ci is not None:\n ci = self.ci\n ci_values = OrderedDict()\n ci_sigmas = [ \"ci%02d\" % (v[0]*100) for v in ci.get(pnames[0]) ]\n ci_names = sorted(list(set(ci_sigmas)))\n ci_idx = { k: [] for k in ci_names }\n for cn, idx in zip(ci_sigmas, range(len(ci_sigmas))):\n ci_idx[cn].append(idx)\n # parameters ci\n for pn in pnames:\n ci_pv = OrderedDict()\n pv = [ v[1] for v in ci.get(pn) ]\n # best\n pv_best = pv[ ci_idx[\"ci00\"][0] ]\n ci_pv[\"best\"] = pv_best\n # ci of each sigma\n pv2 = [ v-pv_best for v in pv ]\n for cn in ci_names[1:]:\n ci_pv[cn] = [ pv2[idx] for idx in ci_idx[cn] ]\n ci_values[pn] = ci_pv\n self.results[\"ci\"] = ci_values", "def get_params_simanalyze(model):\n df = model.fixed_params_sim\n imsize = int(df[df[\"Name\"] == 'analyze_imsize'][\"Value\"].squeeze())\n parameters_simanalyze = {\"analyze_niter\": int(df[df[\"Name\"] == 'analyze_niter'][\"Value\"].squeeze()),\n \"analyze_imsize\": [imsize, imsize],\n \"analyze_weighting\": df[df[\"Name\"] == 'analyze_weighting'][\"Value\"].squeeze(),\n \"analyze_cell\": df[df[\"Name\"] == 'analyze_cell'][\"Value\"].squeeze() +\n df[df[\"Name\"] == 'analyze_cell'][\"Units\"].squeeze(),\n \"analyze_stokes\": df[df[\"Name\"] == 'analyze_stokes'][\"Value\"].squeeze(),\n \"analyze_threshold\": df[df[\"Name\"] == 'analyze_threshold'][\"Value\"].squeeze() +\n df[df[\"Name\"] == 'analyze_threshold'][\"Units\"].squeeze()}\n return parameters_simanalyze", "def put_real_and_predicted_values_into_dataframe(self, path=None):\n if path is not None:\n loaded = load_dumped_file('result/svm/2021-07-26_16-01-18/all_result_of_grid_search')\n y_prediction = loaded.predict(self.x_test)\n else:\n y_prediction = self.grid_svr.predict(self.x_test)\n # print(y_prediction)\n y_prediction = self.sc_y.inverse_transform(y_prediction)\n # print(y_prediction)\n # print(self.y_test.shape)\n # print(a)\n y = self.sc_y.inverse_transform(self.y_test.reshape(1, -1))[0]\n real_predicted_values_dataframe = pd.DataFrame({\n 'Real Values': y,\n 'Predicted Values': y_prediction})\n print(\"MSE:\", mean_squared_error(y, y_prediction))\n return real_predicted_values_dataframe", "def _make_df(recipe: MyRecipe) -> pd.DataFrame:\n df = pd.DataFrame()\n res = recipe.res = FitResults(recipe)\n df[\"name\"] = [\"Rw\", \"half_chi2\"] + res.varnames\n df[\"val\"] = [res.rw, res.chi2 / 2] + res.varvals.tolist()\n df[\"std\"] = [0, 0] + res.varunc\n df = df.set_index(\"name\")\n return df", "def results(self):\r\n return pd.Series(\r\n {\r\n \"metric_bo\": getattr(self, \"metric_bo\", None),\r\n \"time_bo\": getattr(self, \"time_bo\", None),\r\n \"metric_train\": getattr(self, \"metric_train\", None),\r\n \"metric_test\": getattr(self, \"metric_test\", None),\r\n \"time_fit\": getattr(self, \"time_fit\", None),\r\n \"mean_bagging\": getattr(self, \"mean_bagging\", None),\r\n \"std_bagging\": getattr(self, \"std_bagging\", None),\r\n \"time_bagging\": getattr(self, \"time_bagging\", None),\r\n \"time\": getattr(self, \"time\", None),\r\n },\r\n name=self.name,\r\n )", "def get_params_simobserve(model):\n df = model.fixed_params_sim\n parameters_simobserve = {\"incenter\": df[df[\"Name\"] == 'incenter'][\"Value\"].squeeze() + df[df[\"Name\"] == 'incenter'][\n \"Units\"].squeeze(),\n \"compwidth\": df[df[\"Name\"] == 'compwidth'][\"Value\"].squeeze() +\n df[df[\"Name\"] == 'compwidth'][\n \"Units\"].squeeze(),\n \"incell\": df[df[\"Name\"] == 'incell'][\"Value\"].squeeze() + df[df[\"Name\"] == 'incell'][\n \"Units\"].squeeze(),\n \"inwidth\": df[df[\"Name\"] == 'inwidth'][\"Value\"].squeeze() + df[df[\"Name\"] == 'inwidth'][\n \"Units\"].squeeze(),\n \"integration\": df[df[\"Name\"] == 'integration'][\"Value\"].squeeze() +\n df[df[\"Name\"] == 'integration'][\n \"Units\"].squeeze(),\n \"totaltime\": df[df[\"Name\"] == 'totaltime'][\"Value\"].squeeze() +\n df[df[\"Name\"] == 'totaltime'][\n \"Units\"].squeeze(),\n \"mapsize\": df[df[\"Name\"] == 'mapsize'][\"Value\"].squeeze() +\n df[df[\"Name\"] == 'mapsize'][\n \"Units\"].squeeze(),\n \"thermalnoise\": df[df[\"Name\"] == 'thermalnoise'][\"Value\"].squeeze(),\n \"t_ground\": float(df[df[\"Name\"] == 't_ground'][\"Value\"].squeeze()),\n \"t_sky\": float(df[df[\"Name\"] == 't_sky'][\"Value\"].squeeze()),\n \"leakage\": float(df[df[\"Name\"] == 'leakage'][\"Value\"].squeeze()),\n \"t_seed\": int(df[df[\"Name\"] == 't_seed'][\"Value\"].squeeze()),\n \"t_user_pwv\": float(df[df[\"Name\"] == 't_user_pwv'][\"Value\"].squeeze()),\n \"tau0\": float(df[df[\"Name\"] == 'tau0'][\"Value\"].squeeze())\n }\n return parameters_simobserve", "def res_df(self):\n if not hasattr(self, 'res_dict'):\n print('you must perform the fit first! ...e.g. call performfit()')\n return\n\n vals = self._assignvals(self.res_dict)\n for key, val in self._assignvals(self.res_dict).items():\n vals[key] = vals[key][~self.mask]\n\n resdf = pd.DataFrame(vals, list(chain(*self._orig_index))\n ).groupby(level=0).first()\n\n return resdf", "def convert_to_dataframe(obj):\n from pm4py.objects.conversion.log import converter\n df = converter.apply(obj, variant=converter.Variants.TO_DATA_FRAME)\n return df", "def _to_df(result, comm=False, lut_names=None):\n\n # find out how large the matrix is\n imax = max([max(r) for r in list(result.keys())])\n # create and fill the matrix\n res = np.full((imax+1, imax+1), np.nan)\n for k, v in result.items():\n res[k[::-1]] = v\n res = res.transpose()\n\n if comm:\n i_upper = np.triu_indices(res.shape[0], 1)\n i_lower = np.tril_indices(res.shape[0], -1)\n res[i_lower] = res[i_upper]\n\n if lut_names is not None:\n res = pd.DataFrame(data={lut_names[i]: res[:, i] for i in list(range(max(res.shape)))})\n else:\n res = pd.DataFrame(data={i : res[:, i] for i in list(range(max(res.shape)))})\n res.index = res.columns\n return res", "def get_linear_regression_params(row):\n params = ['normalize']\n params_dict = dict()\n for k in params:\n value = row[k]\n params_dict[k] = value\n return params_dict", "def estimate_params(self):\n # initialize the parameter array\n params = np.zeros(7)\n # iterate at most 10 times\n for i in range(10):\n # detrend data\n data = self._data.astype(float)\n offset = data.min()\n amp = data.max() - offset\n\n # calculate the moments up to second order\n M = moments(data, 2)\n\n # calculate model parameters from the moments\n # https://en.wikipedia.org/wiki/Image_moment# Central_moments\n ybar = M[1, 0] / M[0, 0]\n yvar = M[2, 0] / M[0, 0] - ybar**2\n\n xbar = M[0, 1] / M[0, 0]\n xvar = M[0, 2] / M[0, 0] - xbar**2\n\n covar = M[1, 1] / M[0, 0] - xbar * ybar\n\n # place the model parameters in the return array\n params[:3] = amp, xbar, ybar\n params[3] = np.sqrt(np.abs(xvar))\n params[4] = np.sqrt(np.abs(yvar))\n params[5] = covar / np.sqrt(np.abs(xvar * yvar))\n params[6] = offset\n\n if abs(params[5]) < 1:\n # if the rho is valid break the loop.\n break\n\n # save estimate for later use\n self._guess_params = params\n # return parameters to the caller as a `copy`, we don't want them to\n # change the internal state\n return params.copy()", "def linregres2csv(df,csvname=None):\n linres = linregres(df)\n rdf = pd.DataFrame(columns=['Intercept','Slope'])\n rdf.loc['Value']= linres.params[:] # Fill rows\n rdf.loc['SE']= linres.bse[:]\n rdf.loc['StDev'] = linres.bse*np.sqrt(linres.nobs)\n rdf.loc['R2'] = linres.rsquared\n rdf.loc['n'] = linres.nobs\n rdf.loc['P-value'] = linres.pvalues[:]\n rdf = rdf.T # Transpose\n rdf.index.name='Parameter'\n print(rdf)\n try:\n rdf.to_csv(csvname,float_format='%g')\n except:\n pass", "def estimation_table(\n models,\n *,\n return_type=\"dataframe\",\n render_options=None,\n show_col_names=True,\n show_col_groups=None,\n show_index_names=False,\n show_inference=True,\n show_stars=True,\n show_footer=True,\n custom_param_names=None,\n custom_col_names=None,\n custom_col_groups=None,\n custom_index_names=None,\n custom_notes=None,\n confidence_intervals=False,\n significance_levels=(0.1, 0.05, 0.01),\n append_notes=True,\n notes_label=\"Note:\",\n stats_options=None,\n number_format=(\"{0:.3g}\", \"{0:.5f}\", \"{0:.4g}\"),\n add_trailing_zeros=True,\n escape_special_characters=True,\n siunitx_warning=True,\n):\n if not isinstance(models, (tuple, list)):\n raise TypeError(f\"models must be a list or tuple. Not: {type(models)}\")\n models = [_process_model(model) for model in models]\n model_names = _get_model_names(models)\n default_col_names, default_col_groups = _get_default_column_names_and_groups(\n model_names\n )\n column_groups = _customize_col_groups(\n default_col_groups=default_col_groups, custom_col_groups=custom_col_groups\n )\n column_names = _customize_col_names(\n default_col_names=default_col_names, custom_col_names=custom_col_names\n )\n show_col_groups = _update_show_col_groups(show_col_groups, column_groups)\n stats_options = _set_default_stats_options(stats_options)\n body, footer = _get_estimation_table_body_and_footer(\n models,\n column_names,\n column_groups,\n custom_param_names,\n custom_index_names,\n significance_levels,\n stats_options,\n show_col_names,\n show_col_groups,\n show_stars,\n show_inference,\n confidence_intervals,\n number_format,\n add_trailing_zeros,\n )\n\n render_inputs = {\n \"body\": body,\n \"footer\": footer,\n \"render_options\": render_options,\n }\n if return_type == \"render_inputs\":\n out = render_inputs\n elif str(return_type).endswith(\"tex\"):\n out = render_latex(\n **render_inputs,\n show_footer=show_footer,\n append_notes=append_notes,\n notes_label=notes_label,\n significance_levels=significance_levels,\n custom_notes=custom_notes,\n siunitx_warning=siunitx_warning,\n show_index_names=show_index_names,\n show_col_names=show_col_names,\n escape_special_characters=escape_special_characters,\n )\n elif str(return_type).endswith(\"html\"):\n out = render_html(\n **render_inputs,\n show_footer=show_footer,\n append_notes=append_notes,\n notes_label=notes_label,\n custom_notes=custom_notes,\n significance_levels=significance_levels,\n show_index_names=show_index_names,\n show_col_names=show_col_names,\n escape_special_characters=escape_special_characters,\n )\n\n elif return_type == \"dataframe\":\n if show_footer:\n footer.index.names = body.index.names\n out = pd.concat([body.reset_index(), footer.reset_index()]).set_index(\n body.index.names\n )\n else:\n out = body\n else:\n raise ValueError(\n f\"\"\"Value of return type can be either of\n ['data_frame', 'render_inputs','latex' ,'html']\n or a path ending with '.html' or '.tex'. Not: {return_type}.\"\"\"\n )\n\n return_type = Path(return_type)\n if return_type.suffix not in (\".html\", \".tex\"):\n return out\n else:\n return_type.write_text(out)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply number format if the number string is not in scientific format.
def _format_non_scientific_numbers(number_string, format_string): if "e" in number_string: out = number_string else: out = format_string.format(float(number_string)) return out
[ "def format_scientific(self, number):\n return __format_obj().scientific(number)", "def set_scientific(self, b):\n self._scientific = bool(b)", "def _set_isScientificNotationUsed(self, *args) -> \"bool\" :\n return _core.UnitAndValuePreferences__set_isScientificNotationUsed(self, *args)", "def _get_isScientificNotationUsed(self) -> \"bool\" :\n return _core.UnitAndValuePreferences__get_isScientificNotationUsed(self)", "def scientific_notation(value, error):\n if value is None or error is None:\n return ''\n\n exponent = int('{:e}'.format(value).split('e')[-1])\n exponent_str = ' x E'+str(exponent)\n\n if exponent > 0:\n exponent = 0\n if exponent == 0:\n exponent_str = ''\n\n nr_digits = abs(int('{:e}'.format(error/10**exponent).split('e')[-1]))\n\n value_str = ('{:.'+str(nr_digits)+'f}').format(value/10**exponent)\n error_str = ('{:.'+str(nr_digits)+'f}').format(error/10**exponent)\n\n scientific_notation = ('(' + value_str + \" \" + chr(177) + \" \" +\n error_str + ')' + exponent_str)\n\n return scientific_notation", "def sigfig_sign_and_exp(number, format_str=\"{:3.1e}\"):\n scientific = format_str.format(number)\n pattern = r\"\"\"\n (\\d+[\\.]*\\d*) # number.numbers\n e # literal e\n ([+-])0*(\\d+) # either plus or minus, then exponent\n \"\"\"\n sig = re.match(pattern, scientific, re.VERBOSE)\n return sig.groups()", "def use_scientific_notation(values):\n _min = np.min(values)\n _max = np.max(values)\n _range = abs(_max - _min)\n return not (\n _range > 1e-3 and _range < 1e3 and abs(_min) > 1e-3 and abs(_min) < 1e3 and abs(_max) > 1e-3 and abs(_max) < 1e3\n )", "def column_float_formatter(self, col):\n # maxsize: maximum length of string containing the float value.\n # maxent: maximum number of digits places before decimal point.\n # maxdec: maximum number of digits places after decimal point.\n # maxprec: maximum precision of the column values, sum of maxent and maxdec.\n maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0\n sign = False\n fformat = \"F\"\n\n # Find maximum sized value in the col\n for val in col.str_vals:\n # Skip null values\n if val is None or val == \"\":\n continue\n\n # Find format of the Float string\n fmt = self._split_float_format(val)\n # If value is in Scientific notation\n if fmt[4] is True:\n # if the previous column value was in normal Float format\n # set maxsize, maxprec and maxdec to default.\n if fformat == \"F\":\n maxsize, maxprec, maxdec = 1, 0, 0\n # Designate the column to be in Scientific notation.\n fformat = \"E\"\n else:\n # Move to next column value if\n # current value is not in Scientific notation\n # but the column is designated as such because\n # one of the previous values was.\n if fformat == \"E\":\n continue\n\n if maxsize < fmt[0]:\n maxsize = fmt[0]\n if maxent < fmt[1]:\n maxent = fmt[1]\n if maxdec < fmt[2]:\n maxdec = fmt[2]\n if fmt[3]:\n sign = True\n\n if maxprec < fmt[1] + fmt[2]:\n maxprec = fmt[1] + fmt[2]\n\n if fformat == \"E\":\n # If ``formats`` not passed.\n if getattr(col, \"formatted_width\", None) is None:\n col.formatted_width = maxsize\n if sign:\n col.formatted_width += 1\n # Number of digits after decimal is replaced by the precision\n # for values in Scientific notation, when writing that Format.\n col.fortran_format = fformat + str(col.formatted_width) + \".\" + str(maxprec)\n col.format = str(col.formatted_width) + \".\" + str(maxdec) + \"e\"\n else:\n lead = \"\"\n if (\n getattr(col, \"formatted_width\", None) is None\n ): # If ``formats`` not passed.\n col.formatted_width = maxent + maxdec + 1\n if sign:\n col.formatted_width += 1\n elif col.format.startswith(\"0\"):\n # Keep leading zero, if already set in format - primarily for `seconds` columns\n # in coordinates; may need extra case if this is to be also supported with `sign`.\n lead = \"0\"\n col.fortran_format = fformat + str(col.formatted_width) + \".\" + str(maxdec)\n col.format = lead + col.fortran_format[1:] + \"f\"", "def format_number(s):\n #print('im in format number')\n s = str(s)\n\n def split1000(s, sep='.'):\n return s if len(s) <= 3 else split1000(s[:-3], sep) + sep + s[-3:]\n\n s = s.replace('.', ',')\n s = s.split(',')\n d = ''\n if len(s) > 1:\n d = s[1]\n s = s[0]\n simbol = ''\n if len(s) > 2 and s[0] == '-':\n s = s[1:]\n simbol = '-'\n s = split1000(s)\n if d != '':\n d = ',' + d\n #print(simbol + s + d)\n return simbol + s + d", "def num_format(self, name, format):\n self._formats[name] = super().add_format({'num_format': format})", "def non_exp_repr(x, is_int=False):\n\tif is_int:\n\t\ts = repr(int(x))\n\telse:\n\t\ts = repr(float(x))\n\te_loc = s.lower().find('e')\n\tif e_loc == -1:\n\t\treturn s\n\n\tmantissa = s[:e_loc].replace('.', '')\n\texp = int(s[e_loc+1:])\n\n\tassert s[1] == '.' or s[0] == '-' and s[2] == '.', \"Unsupported format\"\t \n\tsign = ''\n\tif mantissa[0] == '-':\n\t\tsign = '-'\n\t\tmantissa = mantissa[1:]\n\n\tdigitsafter = len(mantissa) - 1\t # num digits after the decimal point\n\tif exp >= digitsafter:\n\t\tif is_int:\n\t\t\treturn sign + mantissa + '0'*(exp - digitsafter)\n\t\treturn sign + mantissa + '0' * (exp - digitsafter) + '.0'\n\telif exp <= -1:\n\t\treturn sign + '0.' + '0' * (-exp - 1) + mantissa\n\tip = exp + 1\t\t\t\t\t\t# insertion point\n\tif is_int:\n\t\treturn sign + mantissa[:ip]\n\treturn sign + mantissa[:ip] + '.' + mantissa[ip:]", "def test_handle_number_invalid_format():\n artifacts = types.ColumnArtifacts(\"number\", format=\"unsupported\")\n\n with pytest.raises(exceptions.FeatureNotImplementedError):\n column._handle_number(artifacts=artifacts)", "def clean_number(string):\n comma = string.find(',')\n point = string.find('.')\n if comma > 0 and point > 0:\n if comma < point:\n string = string.replace(',', '')\n else:\n string = string.replace('.', '')\n string = string.replace(',', '.')\n elif comma > 0:\n string = string.replace(',', '.')\n return string", "def str2num(s) :\n try: return int(s)\n except exceptions.ValueError:\n try: return float(s)\n except exceptions.ValueError: return( s )", "def _set_scientificNotationPrecision(self, *args) -> \"bool\" :\n return _core.UnitAndValuePreferences__set_scientificNotationPrecision(self, *args)", "def set_number_format(self, kind=\"float\", *args, **kwargs):\n if kind==\"float\":\n formatter=format.FloatFormatter(*args,**kwargs)\n elif kind==\"int\":\n formatter=format.IntegerFormatter()\n else:\n try:\n (\"{:\"+kind+\"}\").format(0)\n formatter=format.FmtStringFormatter(kind)\n except ValueError:\n raise ValueError(\"unknown format: {}\".format(kind))\n self.change_formatter(formatter)", "def sn_round(number: float) -> float:\n scientific_notation_str = '{:.2e}'.format(number)\n num_str, order_str = scientific_notation_str.split('e')\n rounded_num = round(eval(num_str), 3)\n num_str = str(rounded_num)\n scientific_notation_str = 'e'.join((num_str, order_str))\n return eval('{:.2e}'.format(number))", "def asNumeral(value):", "def test_to_scientific_tuple_exceptions():\n AI(pmisc.number._to_scientific_tuple, \"number\", number=None)\n AI(pmisc.number._to_scientific_tuple, \"number\", number=True)\n AI(pmisc.number._to_scientific_tuple, \"number\", number=5 + 3j)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the maximum number of digits after a decimal point in a DataFrame.
def _get_digits_after_decimal(df): max_trail = 0 for c in df.columns: try: trail_length = ( ( df[c][~df[c].astype("str").str.contains("e")] .astype("str") .str.split(".", expand=True)[1] .astype("str") .replace("None", "") ) .str.len() .max() ) except KeyError: trail_length = 0 if trail_length > max_trail: max_trail = trail_length return max_trail
[ "def _get_min_significant_precision(df: pd.DataFrame):\n\n # Count number of rows\n num_rows = df.shape[0]\n # Get significance of single row, save as string\n row_significance_string = str(1.0 / num_rows)\n # Parse string and count number of leading, significant zeros\n start_index = row_significance_string.index('.') + 1\n num_zeros = 0\n for char in row_significance_string[start_index:]:\n if char == '0':\n num_zeros += 1\n else:\n break\n # Final min precision is number of leading zeros + 2 places of significance\n precision = num_zeros + 2\n\n return precision", "def decimals(self) -> int:\r\n if self.__decimals is None:\r\n decimals = str(self.multiplier)[::-1].find('.')\r\n return 0 if decimals < 0 else decimals\r\n\r\n return self.__decimals", "def find_precision(value):\n decimals = ''\n try:\n stub, decimals = str(value).split('.')\n except ValueError:\n pass\n return len(decimals)", "def _get_precision(np_dtype: Union[Type, str, dtype]) -> int:\n if np.issubdtype(np_dtype, np.floating):\n return np.finfo(np_dtype).precision\n else:\n return np.inf", "def _decimal_place(x):\n if x == 0:\n digits = 0\n else:\n digits = -int(np.log10(abs(x)) // 1)\n return digits", "def determine_col_proportion(df: pd.DataFrame) -> np.ndarray:\n col_size = df.apply(\n lambda x: max(x.astype(\"str\").apply(lambda y: len(y))), axis=0\n ).values\n per_col_size = col_size / sum(col_size)\n return per_col_size", "def max_number_of_records_per_record_set(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"max_number_of_records_per_record_set\")", "def df_size(df):\n total = 0.0\n for col in df:\n total += df[col].nbytes\n return total/1048576.0", "def roundDecimalPlaces(data, maxDP=-1):\n floatMatches = list(re.finditer('\\d+\\.\\d+', data))\n dataList = []\n prevEndIndex = 0\n for floatMatch in floatMatches:\n floatStartI = floatMatch.span()[0]\n floatEndI = floatMatch.span()[1]\n floatStr = data[floatStartI:floatEndI]\n if(maxDP == -1):\n floatStr = str(float(floatStr))\n else:\n floatStr = str(round(float(floatStr) * 10**maxDP) / 10**maxDP)\n dataList.append(data[prevEndIndex:floatStartI])\n dataList.append(floatStr)\n prevEndIndex = floatEndI\n # add remaining data after last float\n lastFloatEndIndex = floatMatches[-1].span()[1]\n dataList.append(data[lastFloatEndIndex:])\n # make in to string\n return ''.join(dataList)", "def get_number_of_digits(number):\n return int(math.log10(number))+1", "def max_length(self) -> float:", "def get_digit(self):\n numeric_cols = self.serie.str.isnumeric()\n numeric_cols = numeric_cols.to_frame()\n return numeric_cols[self.col_name].values.sum()", "def get_decimal_precision(number):\n # Copied from: https://github.com/mahmoud/boltons/pull/59\n assert isinstance(number, decimal.Decimal)\n decimal_tuple = number.normalize().as_tuple()\n if decimal_tuple.exponent >= 0:\n return 0\n return abs(decimal_tuple.exponent)", "def num_length(number: int) -> int:\n return floor(log10(abs(number))) + 1", "def num_of_digits(test_number):\n return int(math.log(test_number,10))+1", "def _maxLengthSize(self):\n return math.ceil(math.log10(self.MAX_LENGTH)) + 1", "def _get_max_value(x):\n return int(max(x.asnumpy()))", "def truncate_like_pd_max_colwidth(x: any) -> str:\n max_colwidth = pd.get_option(\"display.max_colwidth\")\n if max_colwidth is None:\n return x\n else:\n s = str(x)\n if len(s) <= max_colwidth:\n return s\n else:\n return s[:max_colwidth - 3] + '...'", "def get_memory_size(ds: pd.DataFrame) -> float:\n memory_size = ds.memory_usage(index=False).sum()\n if memory_size<KB:\n return memory_size\n else:\n return memory_size/KB" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Align integer numbers and strings at the center of model column.
def _center_align_integers_and_non_numeric_strings(sr): sr = deepcopy(sr) for i in sr.index: if _is_integer(sr[i]): sr[i] = f"\\multicolumn{{1}}{{c}}{{{str(int(float(sr[i])))}}}" else: string_without_stars = sr[i].split("$", 1)[0] if not string_without_stars.replace(".", "").isnumeric(): sr[i] = f"\\multicolumn{{1}}{{c}}{{{sr[i]}}}" return sr
[ "def align_decimal(number, width):\n number = unicode(number)\n return figurespace_padding(number, width) + number", "def align_columns(self, alignment):\n for cid, anchor in enumerate(alignment):\n self.Widget.column(cid, anchor=anchor)", "def align_labels(widgets):\n for widget in widgets:\n widget.setAlignment(Qt.AlignRight | Qt.AlignVCenter)", "def calcColWidth(self):", "def autolabel(columns):\r\n for column in columns:\r\n label = str(round(column.get_x(),2))+\" ; \"+str(round(column.get_height(),0))\r\n if round(column.get_height(),0) == 0:\r\n label = round(column.get_x(),2)\r\n ax.annotate('{}'.format(label),\r\n xy=(column.get_x(), column.get_height()),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')", "def format_cell(self, value, width, cell_formating, **text_formating):\n strptrn = \" {:\" + '{:s}{:d}'.format(cell_formating.get('align', '<'), width) + \"s} \"\n strptrn = self.fmt(strptrn, text_formating)\n return strptrn.format(value)", "def format(self, value, width):\n value = ' ' * self.padding_left + value + ' ' * self.padding_right\n alignment = {'left': '<', 'center': '^', 'right': '>'}[self.halign]\n width = width + self.padding_left + self.padding_right\n return '{:{align}{width}s}'.format(value, align=alignment, width=width)", "def align_center_of_mass(self):\n translation = self.get_center_of_mass()\n self.set_position(translation*2.0)\n for el in self.iter_elements(lambda elem: isinstance(elem, SDF.Posable)):\n el.translate(-translation)\n for joint in self.joints:\n joint.translate(-translation)\n return translation", "def display_col():\n title_line = (\"Name\", \"Age\", \"Cost\")\n title_form = \"{:<15}{:^10}{:^15}\"\n\n data = ((\"Ford\", \"25\", \"$75125.99\"),\n (\"Hudson\", \"2\", \"$195\"),\n (\"Flint\", \"139.5\", \"$200099.5\")\n )\n data_form = \"{:<15}{:>10}{:>15}\"\n\n print(title_form.format(*title_line))\n for item in data:\n print(data_form.format(*item))", "def text_align(context, line, available_width, last):\r\n align = line.style.text_align\r\n if align in ('-weasy-start', '-weasy-end'):\r\n if (align == '-weasy-start') ^ (line.style.direction == 'rtl'):\r\n align = 'left'\r\n else:\r\n align = 'right'\r\n if align == 'justify' and last:\r\n align = 'right' if line.style.direction == 'rtl' else 'left'\r\n if align == 'left':\r\n return 0\r\n offset = available_width - line.width\r\n if align == 'justify':\r\n justify_line(context, line, offset)\r\n return 0\r\n if align == 'center':\r\n offset /= 2.\r\n else:\r\n assert align == 'right'\r\n return offset", "def horizontal_alignment(self):\n return self.container['horizontal_alignment']", "def _get_col_width(self):\n attr_name_width = max(len(attr_name) for attr_name in self.attributes)\n attr_val_width = max((len(str(attr_val))\n for tup in self.tuples\n for version in tup\n for attr_val in version), default=0)\n return max(attr_name_width, attr_val_width) + 2 # padding", "def translate_to_cell_center(self):\n if self.cell is None:\n raise NameError(\"cell not defined\")\n else:\n self.translate_to_zero()\n cell_center = (self.cell[0] + self.cell[1] + self.cell[2]) / 2\n self.translate(cell_center)", "def align_data(data):\n spacings = [max([len(seq[i]) for seq in data.values()]) for i in range(len(data[list(data.keys())[0]]))]\n data_aligned = dict()\n # for each entry, create aligned string\n for key, seq in data.items():\n str_aligned = \"\"\n for token, spacing in zip(seq, spacings):\n str_aligned += token + \" \" * (spacing - len(token) + 1)\n data_aligned[key] = str_aligned\n return data_aligned", "def _offset(self, row, column):\n return row * 10 * self.width + column * 10", "def printmargins(self):\n\n print 'Margins:'\n wid = self.colwidth\n\n print ''.rjust(wid),\n for col in self.entries:\n print col.rjust(wid),\n print\n\n for row in self.entries:\n print row.rjust(wid),\n for col in self.entries:\n if (col == row):\n val = '`'\n else:\n val = self.margins.get((row,col), '')\n print str(val).rjust(wid),\n print\n print", "def renderCell(self, i, j):\n entry = self.array2d[i][j]\n column_width = self.column_widths[j]\n spaces = column_width - entry.width\n\n if entry.align == 'left':\n cont = entry.text + ' ' * spaces\n elif entry.align == 'right':\n cont = ' ' * spaces + entry.text\n else:\n if spaces % 2 == 0:\n half = spaces // 2\n cont = ' ' * half + entry.text + ' ' * half\n else:\n pref = (spaces - 1) // 2\n post = spaces - pref\n cont = ' ' * pref + entry.text + ' ' * post\n\n return ' %s ' % cont", "def center_offset(text, width):\n if isinstance(text,int):\n return int((width - text) / 2)\n\n return int((width - len(text)) / 2)", "def centerInCell(self):\n x, y = self.pos\n x = int(x) + 0.5\n y = int(y) + 0.5\n self.pos = (x,y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return pandas.Styler object based ont the data and styling options.
def _get_updated_styler( df, show_index_names, show_col_names, show_col_groups, escape_special_characters ): styler = df.style if not show_index_names: styler = styler.hide(names=True) if not show_col_names: styler = styler.hide(axis=1) if not show_col_groups: styler = styler.hide(axis=1, level=0) for ax in [0, 1]: styler = styler.format_index(escape=escape_special_characters, axis=ax) return styler
[ "def _pandas_style_to_css(style_type, style, uuid, separator=\"\"):\n declarations = []\n for css_property, css_value in style[\"props\"]:\n declaration = css_property.strip() + \": \" + css_value.strip()\n declarations.append(declaration)\n\n table_selector = \"#T_\" + str(uuid)\n\n # In pandas < 1.1.0\n # translated_style[\"cellstyle\"] has the following shape:\n # [\n # {\n # \"props\": [[\"color\", \" black\"], [\"background-color\", \"orange\"], [\"\", \"\"]],\n # \"selector\": \"row0_col0\"\n # }\n # ...\n # ]\n #\n # In pandas >= 1.1.0\n # translated_style[\"cellstyle\"] has the following shape:\n # [\n # {\n # \"props\": [(\"color\", \" black\"), (\"background-color\", \"orange\"), (\"\", \"\")],\n # \"selectors\": [\"row0_col0\"]\n # }\n # ...\n # ]\n if style_type == \"table_styles\" or (\n style_type == \"cell_style\" and type_util.is_pandas_version_less_than(\"1.1.0\")\n ):\n cell_selectors = [style[\"selector\"]]\n else:\n cell_selectors = style[\"selectors\"]\n\n selectors = []\n for cell_selector in cell_selectors:\n selectors.append(table_selector + separator + cell_selector)\n selector = \", \".join(selectors)\n\n declaration_block = \"; \".join(declarations)\n rule_set = selector + \" { \" + declaration_block + \" }\"\n\n return rule_set", "def data_style_func(df):\n\n def _style_func(r, c):\n if isinstance(df.loc[r, c], (np.int_, float, np.uint)):\n return td_style_to_str(default_numeric_td_style)\n return td_style_to_str(default_td_style)\n\n return _style_func", "def _marshall_styles(proto, styler, styles):\n css_rules = []\n\n if \"table_styles\" in styles:\n table_styles = styles[\"table_styles\"]\n table_styles = _trim_pandas_styles(table_styles)\n for style in table_styles:\n # NB: styles in \"table_styles\" have a space\n # between the UUID and the selector.\n rule = _pandas_style_to_css(\n \"table_styles\", style, styler.uuid, separator=\" \"\n )\n css_rules.append(rule)\n\n if \"cellstyle\" in styles:\n cellstyle = styles[\"cellstyle\"]\n cellstyle = _trim_pandas_styles(cellstyle)\n for style in cellstyle:\n rule = _pandas_style_to_css(\"cell_style\", style, styler.uuid)\n css_rules.append(rule)\n\n if len(css_rules) > 0:\n proto.styler.styles = \"\\n\".join(css_rules)", "def style_df(df):\n\n cell_hover = {\n 'selector': 'td:hover',\n 'props': [('background-color', '#ffffb3')]\n }\n index_names = {\n 'selector': '.index_name',\n 'props': 'font-style: italic; color: black; background-color: white; '\n 'font-weight:bold; border: 0px solid #a4b3dc; text-transform: capitalize; '\n 'text-align:left;'\n }\n headers = {\n 'selector': 'th:not(.index_name)',\n 'props': 'background-color: #DDDDDD; color: black; border: 1px solid #ffffff;'\n }\n center_heading = {\n 'selector': 'th.col_heading',\n 'props': 'text-align: center;'\n }\n left_index = {\n 'selector': '.row_heading',\n 'props': 'text-align: left;'\n }\n td = {\n 'selector': 'td',\n 'props': f'text-align: right; '\n }\n nrow = {\n 'selector': 'tr:nth-child(even)',\n 'props': 'background-color: #F5F5F5;'\n }\n all_styles = [cell_hover, index_names, headers, center_heading, nrow, left_index, td]\n\n fc = lambda x: f'{x:,.3f}' if isinstance(x, (float, int)) else x\n f3 = lambda x: f'{x:.3f}' if isinstance(x, (float, int)) else x\n f5g = lambda x: f'{x:.5g}' if isinstance(x, (float, int)) else x\n # guess sensible defaults\n fmts = {'E[X]': fc,\n 'Est E[X]': fc,\n 'Err E[X]': f5g,\n 'CV(X)': f3,\n 'Est CV(X)': f3,\n 'Err CV(X)': f5g,\n 'Skew(X)': f3,\n 'Est Skew(X)': f3}\n return df.style.set_table_styles(all_styles).format(fmts)", "def get_style_defs(self, arg=None):\r\n if arg is None:\r\n arg = ('cssclass' in self.options and '.'+self.cssclass or '')\r\n if isinstance(arg, str):\r\n args = [arg]\r\n else:\r\n args = list(arg)\r\n\r\n def prefix(cls):\r\n if cls:\r\n cls = '.' + cls\r\n tmp = []\r\n for arg in args:\r\n tmp.append((arg and arg + ' ' or '') + cls)\r\n return ', '.join(tmp)\r\n\r\n styles = [(level, ttype, cls, style)\r\n for cls, (style, ttype, level) in self.class2style.items()\r\n if cls and style]\r\n styles.sort()\r\n lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])\r\n for (level, ttype, cls, style) in styles]\r\n if arg and not self.nobackground and \\\r\n self.style.background_color is not None:\r\n text_style = ''\r\n if Text in self.ttype2class:\r\n text_style = ' ' + self.class2style[self.ttype2class[Text]][0]\r\n lines.insert(0, '%s { background: %s;%s }' %\r\n (prefix(''), self.style.background_color, text_style))\r\n if self.style.highlight_color is not None:\r\n lines.insert(0, '%s.hll { background-color: %s }' %\r\n (prefix(''), self.style.highlight_color))\r\n return '\\n'.join(lines)", "def make_style(self, opts=(), **kwargs):\n if len(kwargs) == 0 and len(opts) == 0:\n return lambda text: text\n return lambda text: self.colorize(text, opts, **kwargs)", "def index_style_func(df):\n\n def _style_func(node):\n return td_style_to_str(default_td_style)\n\n return _style_func", "def get_styles(self):\r\n styles = list()\r\n styles.append(html.css(get_resource_path('style.css')))\r\n # This replaces user specified column widths from config file\r\n overrides = list()\r\n cw = ['2em', '3em', 'other']\r\n cwtmp = \"span.col{} {{width: {}}}\"\r\n for i, size in enumerate(cw):\r\n overrides.append(cwtmp.format(i, size))\r\n overrides.pop() # Always ignore last value\r\n styles.append(html.style(\"\\n\".join(overrides)))\r\n return \"\".join(styles)", "def header_style_func(df):\n\n def _style_func(node):\n return td_style_to_str(default_th_style)\n\n return _style_func", "def sq_gui_style(df, table, is_assert=False):\n\n if is_assert:\n if not df.empty:\n return df.style.apply(color_row, axis=1, field='result',\n fieldval=['fail'], bgcolor='darkred',\n color='white')\n else:\n return df\n\n if table == 'bgp' and 'state' in df.columns:\n return df.style.hide_index() \\\n .applymap(color_element_red, fieldval=['Established'],\n subset=pd.IndexSlice[:, ['state']])\n elif table == 'ospf' and 'adjState' in df.columns:\n return df.style.hide_index() \\\n .applymap(color_element_red,\n fieldval=[\"full\", \"passive\"],\n subset=pd.IndexSlice[:, ['adjState']])\n\n elif table == \"routes\" and 'prefix' in df.columns:\n return df.style.hide_index() \\\n .apply(color_row, axis=1, fieldval=['0.0.0.0/0'],\n field='prefix')\n elif table == \"interfaces\" and 'state' in df.columns:\n return df.style.hide_index().apply(ifstate_red, axis=1)\n elif table == \"device\":\n return df.style.hide_index() \\\n .apply(color_row, axis=1, fieldval=['dead', 'neverpoll'],\n field='status', bgcolor='red', color='white')\n else:\n return df.style.hide_index()", "def getStyle(self):", "def index_name_style_func(df):\n\n def _style_func(index_name):\n return td_style_to_str(default_th_style)\n\n return _style_func", "def generate_style(python_style, ui_style):\n\n return merge_styles([python_style, ui_style])", "def _set_style_list(self):\n # list of style choices\n for idx in range(len(STYLE)):\n self.font_style.Append(STYLE[idx], idx)", "def init_table_style(self):\n verify_style_label(self.style_label, dict_of_styles)\n self.style = deepcopy(dict_of_styles[self.style_label])\n self.drawable.setStyle(self.style)", "def _get_format_from_style(self, token: Any, style: Any) -> Any:\n result = QtGui.QTextCharFormat()\n #\n # EKR: handle missing tokens.\n try:\n data = style.style_for_token(token).items()\n except KeyError as err:\n key = repr(err)\n if key not in self.key_error_d:\n self.key_error_d[key] = True\n g.trace(err)\n return result\n for key, value in data:\n if value:\n if key == 'color':\n result.setForeground(self._get_brush(value))\n elif key == 'bgcolor':\n result.setBackground(self._get_brush(value))\n elif key == 'bold':\n result.setFontWeight(Weight.Bold)\n elif key == 'italic':\n result.setFontItalic(True)\n elif key == 'underline':\n result.setUnderlineStyle(UnderlineStyle.SingleUnderline)\n elif key == 'sans':\n result.setFontStyleHint(Weight.SansSerif)\n elif key == 'roman':\n result.setFontStyleHint(Weight.Times)\n elif key == 'mono':\n result.setFontStyleHint(Weight.TypeWriter)\n return result", "def make_bar_formats(sns,work,style='candy'):\n\tcolors = dict([(key,brewer2mpl.get_map('Set1','qualitative',9).mpl_colors[val])\n\t\tfor key,val in {\n\t\t'red':0,'blue':1,'green':2,'purple':3,'orange':4,\n\t\t'yellow':5,'brown':6,'pink':7,'grey':8,}.items()])\n\tcolors['pink'] = mpl.colors.ColorConverter().to_rgb(\"#f1948a\")\n\tcolors['beige'] = mpl.colors.ColorConverter().to_rgb(\"#C3C3AA\")\n\t#---combine blue and green to denote the dilute Na,Cal simulation\n\tbgw = 0.3\n\tcolors['bluegreen'] = np.average((colors['blue'],colors['green']),weights=[1-bgw,bgw],axis=0)\n\tbgw2 = 0.6\n\tcolors['bluegreen2'] = np.average((colors['blue'],colors['green']),weights=[1-bgw2,bgw2],axis=0)\n\tif style=='original':\n\t\tout = dict([(sn,{'c':colorize(work.meta[sn],comparison=comparison)}) for sn in sns])\n\telif style=='candy':\n\t\tcolors_ions = {'NA':'green','Na,Cal':'bluegreen','MG':'pink','Cal':'blue','K':'grey',}\n\t\thatches_lipids = {'PI2P':'//','P35P':'-','PIPU':'xx','PIPP':'++','SAPI':''}\n\t\tout = dict([(sn,{\n\t\t\t'c':colors[colors_ions[work.meta[sn]['cation']]],\n\t\t\t'hatch':hatches_lipids[work.meta[sn]['ptdins_resname']]}) for sn in sns])\n\telif style=='actinlink':\n\t\tout = dict([(sn,{\n\t\t\t#---the actinlink plots have custom colors\n\t\t\t'c':colors[sns_explicit_color_names[sn]],\n\t\t\t'hatch':'//' if work.meta[sn].get('cholesterol',False) else ''}) for sn in sns])\n\telse: raise Exception('no bar style: %s'%style)\n\tfor k,v in out.items(): v.update(edgecolor=v['c'])\n\treturn out", "def lazy_style(*args):\n if len(args) == 0:\n raise TypeError(\n \"When applying a style method to a color, the color instance \"\n \"cannot be mutated with the style method - the method can \"\n \"only be used to apply the color and style to a specified \"\n \"argument, which must be provided to the style method.\"\n )\n sty = style(code)\n return sty(self.__call__(args[0]))", "def apply_style(self, value):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if number is an integer (including a float with only zeros as digits)
def _is_integer(num): try: out = int(float(num)) == float(num) except ValueError: out = False return out
[ "def isinteger(value):\n try:\n return value == int(value)\n except TypeError:\n return False", "def _is_number(value):\n if isinstance(value, int) or isinstance(value, float):\n return True\n return False", "def is_number(n):\n try:\n int(n)\n except ValueError:\n return False\n else:\n return True", "def is_int(x):\n return type(x) == int", "def _is_int(val):\n try:\n int(val)\n return True\n except ValueError:\n return False", "def isSimpleNumeric(x):\n \n return ((type(x)==int)or(type(x)==float))", "def _check_intable(f):\n if(_is_int(f)):\n return(int(float(f)))\n else:\n return(float(f))", "def isint(x):\n if isinstance(x, int_types):\n return True\n try:\n x = mpmathify(x)\n except:\n return False\n if isinstance(x, mpf):\n if isnan(x) or isinf(x):\n return False\n return x == int(x)\n return False", "def is_int(a):\n\ttry:\n\t\tint (a)\n\t\treturn True\n\texcept:\n\t\treturn False", "def isInt(cls, value):\n return cls.asInt(value) is not None", "def isnumber(cls, value):\n if isinstance(value, (int, long, float)): # True if value is already a number\n return True\n try:\n float(value) # Test if this can be converted to a number\n return True\n except:\n return False", "def test_is_int_float(self):\n self.assertEqual(is_int('1.01'), False)", "def isNumeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def check_int(x):\n if isinstance(x, int):\n return x\n if not x.isdecimal():\n log.error(\"{} is not a decimal number\".format(x))\n exit(1)\n return int(x)", "def is_numeric(x):\n \n try:\n float(x)\n return True\n except ValueError:\n return False", "def is_integer(self):\n return type(self.value) == int", "def has_no_digits_in_float(value):\n return value == math.floor(value)", "def _check_for_int(x):\n try:\n y = int(x)\n except OverflowError:\n pass\n else:\n # There is no way in AMF0 to distinguish between integers and floats\n if x == x and y == x:\n return y\n\n return x", "def is_any_int(x):\n return isinstance(x, Integral)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of explores in the project, excluding skipped explores.
def count_explores(self) -> int: return len([explore for explore in self.iter_explores() if not explore.skipped])
[ "def getUnprofitableCount(self):\n\t\treturn len(self.__losses)", "def get_num_attacks_per_day():", "def n_experiences(self):\n\n return len(self.heap.track)", "def total_projects(self) -> int:\n return Project.objects.filter(client=self).count()", "def num_ignored(self):\n return self._test_stats.ignored", "def remaining_days(self) -> float:\n return self.project_days - self.committed_days", "def committed_days(self) -> float:\n return sum(a.effort for a in RSEAllocation.objects.filter(project=self))", "def GemLimit(self):\n return self._game_rules.max_gems - sum(self._self_state.gem_counts.values())", "def query_exception_count(self):\n return len(re.findall('EXCEPTION',\n self.impalad_test_service.read_debug_webpage('queries')))", "def count_soldiers(self):\n return self.num_soldiers", "def minions_killed(self):\r\n return self.data.totalMinionKills", "def _get_count(self) -> \"size_t\" :\n return _core.DataProjects__get_count(self)", "def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")", "def get_max_hours(context):\r\n progress = context['project_progress']\r\n return max([0] + [max(p['worked'], p['assigned']) for p in progress])", "def count_recovered(model,grid_size):\n list_state = [a for a in model.schedule.agents if (a.state == a.RECOVERED or a.state == a.DEAD)]\n return len(list_state)/grid_size", "def skip_cycles(self) -> int:\n tokens = self._skip_pattern.findall(self.structure)\n return sum((int(re.sub(r'\\D', '', token)) for token in tokens))", "def sup_2048(self):\n count = 0\n for i in range(4):\n for j in range(4):\n if self.grid[i][j] >= 2048:\n count += 1\n self.goal = count\n return self.goal", "def get_num_attempted_catches(self):\n return self._game.count(FLAG)", "def getRetryCount():\n return opencue.cuebot.Cuebot.getConfig().get('cuebot.exception_retries', 3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates Dimension objects for all dimensions in a given explore.
async def build_explore_dimensions( client: LookerClient, explore: Explore, ignore_hidden_fields: bool = False, ) -> None: dimensions_json = await client.get_lookml_dimensions( explore.model_name, explore.name ) dimensions: List[Dimension] = [] for dimension_json in dimensions_json: dimension: Dimension = Dimension.from_json( dimension_json, explore.model_name, explore.name ) if dimension.url is not None: dimension.url = client.base_url + dimension.url if not dimension.ignore and not (dimension.is_hidden and ignore_hidden_fields): dimensions.append(dimension) explore.dimensions = dimensions if len(explore.dimensions) == 0: logger.warning( f"Warning: Explore '{explore.name}' does not have any non-ignored " "dimensions and will not be validated." ) explore.skipped = SkipReason.NO_DIMENSIONS
[ "def builddimensions(self):\r\n e = self.experiment # synonym\r\n\r\n # find unique dimension values across variables. Dim values could be 0, 5, 5, 5, 2, 666, -74,...\r\n dims = list(np.unique([ var.dim for var in e.variables ])) # np.unique returns sorted values\r\n\r\n # renumber dimension values to be consecutive 0-based\r\n newdims = range(len(dims)) # 0-based consecutive dim values\r\n old2new = dict(zip(dims, newdims)) # maps from old dim values to new ones\r\n for var in e.variables:\r\n var.dim = old2new[var.dim] # overwrite each Variable's old dim value with the new one\r\n\r\n # use newdims to init a list of Dimensions, each with an empty Variables object\r\n self.dimensions = []\r\n for dim in newdims:\r\n d = Dimension(variables=Variables(), dim=dim)\r\n self.dimensions.append(d)\r\n\r\n # now assign each Variable object to the appropriate Dimension object\r\n for var in e.variables:\r\n d = self.dimensions[var.dim] # get the Dimension object\r\n d.variables[var.name] = var # assign the Variable to the Dimension's Variables\r\n d.shuffle = var.shuffle # set the Dimension's shuffle and random flags according to this Variable\r\n d.random = var.random\r\n d.check() # make sure everything is consistent in this Dimension\r", "def _createDimensions(): \n for d in ncin.dimensions:\n dimVar = ncin[d]\n ncout.createDimension(d, dimVar.size)\n data = ncout.createVariable(d, np.dtype('double').char, (d))\n _add_attributes(dimVar, data)\n if d == dimTName:\n data.units = \"Hours since {}-{}-{} {}\".format(year, month, day, timeString)\n if d == dimYName:\n data[:] = dimVar[::-1]\n else:\n data[:] = dimVar[:]", "def create_dimensions():\n m1 = classes.Dimension('Mass_1', 5, 50, sampler.kroupa, prior.kroupa)\n q = classes.Dimension('q', 0.1, 1, sampler.uniform, prior.uniform, should_print = False)\n a = classes.Dimension('Separation', .01, 1000, sampler.flat_in_log, prior.flat_in_log)\n #kick_velocity_random_1 = classes.Dimension('Kick_Velocity_Random_1', 0, 1, sampler.uniform, prior.uniform)\n #kick_theta_1 = classes.Dimension('Kick_Theta_1', -np.pi / 2, np.pi / 2, sampler.uniform_in_cosine, prior.uniform_in_cosine)\n #kick_phi_1 = classes.Dimension('Kick_Phi_1', 0, 2 * np.pi, sampler.uniform, prior.uniform)\n #kick_velocity_random_2 = classes.Dimension('Kick_Velocity_Random_2', 0, 1, sampler.uniform, prior.uniform)\n #kick_theta_2 = classes.Dimension('Kick_Theta_2', -np.pi / 2, np.pi / 2, sampler.uniform_in_cosine, prior.uniform_in_cosine)\n #kick_phi_2 = classes.Dimension('Kick_Phi_2', 0, 2 * np.pi, sampler.uniform, prior.uniform)\n #return [m1, q, a, kick_velocity_random_1, kick_theta_1, kick_phi_1, kick_velocity_random_2, kick_theta_2, kick_phi_2]\n return [m1, q, a]", "def dimension(self, name):\n dim_classes = {\n \"crimes\": Crimes,\n \"regions\": Regions,\n \"periods\": Periods\n }\n if name not in dim_classes.keys():\n raise Exception(\"{} is not a valid dimension. Options are {}.\"\\\n .format(name, \",\".join(dim_classes.keys())))\n \n if not self._html:\n self._fetch_html()\n\n if not getattr(self, \"_\" + name):\n dim = dim_classes[name](html=self._html)\n setattr(self, \"_\" + name, dim)\n\n return getattr(self, \"_\" + name)", "def dimensions(self) -> DimensionGraph:\n base = self.universe.empty\n if len(self) == 0:\n return base\n return base.union(*[datasetType.dimensions for datasetType in self.keys()])", "def size(self, abstractdim_rewriter=None):\n dims = self.dimensions\n if abstractdim_rewriter:\n # Only apply it to dims that are still abstract\n def adrw(x):\n if not x.concrete:\n return abstract_dim.AbstractDim(abstractdim_rewriter(str(x)))\n return x\n dims = map(adrw, dims)\n return abstract_dim.list_product(dims)", "def dimensions(self) -> DimensionGraph:\n base = self.universe.empty\n if len(self) == 0:\n return base\n return base.union(*[scaffolding.dimensions for scaffolding in self.values()])", "def dimension(self, form):\n keys = ['x1', 'x2', 'y1', 'y2', 'w', 'h']\n res = {}\n for key in keys:\n res.update(\n {key: form[key]}\n )\n return {form.get('id-name'): res}", "def SetDimensionInformation(dims):", "def create_expense_objects(expenses: List[Dict], workspace_id: int):\n expense_objects = []\n\n for expense in expenses:\n for custom_property_field in expense['custom_properties']:\n if expense['custom_properties'][custom_property_field] == '':\n expense['custom_properties'][custom_property_field] = None\n expense_object, _ = Expense.objects.update_or_create(\n expense_id=expense['id'],\n defaults={\n 'employee_email': expense['employee_email'],\n 'employee_name': expense['employee_name'],\n 'category': expense['category'],\n 'sub_category': expense['sub_category'],\n 'project': expense['project'],\n 'expense_number': expense['expense_number'],\n 'org_id': expense['org_id'],\n 'claim_number': expense['claim_number'],\n 'amount': _round_to_currency_fraction(expense['amount'], expense['currency']),\n 'currency': expense['currency'],\n 'foreign_amount': expense['foreign_amount'],\n 'foreign_currency': expense['foreign_currency'],\n 'tax_amount': expense['tax_amount'],\n 'tax_group_id': expense['tax_group_id'],\n 'settlement_id': expense['settlement_id'],\n 'reimbursable': expense['reimbursable'],\n 'billable': expense['billable'],\n 'state': expense['state'],\n 'vendor': expense['vendor'][:250] if expense['vendor'] else None,\n 'cost_center': expense['cost_center'],\n 'purpose': expense['purpose'],\n 'report_id': expense['report_id'],\n 'report_title': expense['report_title'],\n 'corporate_card_id': expense['corporate_card_id'],\n 'file_ids': expense['file_ids'],\n 'spent_at': expense['spent_at'],\n 'approved_at': expense['approved_at'],\n 'posted_at': expense['posted_at'],\n 'expense_created_at': expense['expense_created_at'],\n 'expense_updated_at': expense['expense_updated_at'],\n 'fund_source': SOURCE_ACCOUNT_MAP[expense['source_account_type']],\n 'verified_at': expense['verified_at'],\n 'custom_properties': expense['custom_properties'],\n 'payment_number': expense['payment_number'],\n },\n )\n\n if not ExpenseGroup.objects.filter(expenses__id=expense_object.id).first():\n expense_objects.append(expense_object)\n\n return expense_objects", "def get_dimensions(self, units):\n return self.id.to(units), self.od.to(units)", "def write_dims(dimensions_dict, nc):\n\n for dimension in list(dimensions_dict.values()):\n dim_size = 0\n if dimension == 'num_characters':\n dim_size = 4\n nc.createDimension(dimension, dim_size)", "def explosion_dimension(dim_mini, dim_max, nb_point, nb_carre):\n print('lacement calcul')\n tms, dims = [], []\n \n for dim in range(dim_mini, dim_max):\n print('dimension de calcul : ', dim)\n set_point = creation_point_rectangles_2(nb_point, nb_carre, dim)\n t1 = clock()\n #ht = mv1_algo(set_point, 10 )\n ht = mv1_algo_opti(set_point, nb_carre, distance, 0.2)\n t2 = clock()\n tms.append(t2 - t1)\n dims.append(dim)\n save = open('result_algo_2.txt', 'a')\n save.write('\\n' + str(dim) + ' '+str(t2 - t1))\n print('ecriture ok')\n save.close()\n \n print(tms, dims)\n plt.plot(dims, tms)\n plt.xlabel('Dimension')\n plt.ylabel(' Computing time')\n plt.title(' Evolution time and dimension for n = 5000 et eps = 0.2')\n plt.show()", "def run(self):\n\n # Gets all table objects in the scope\n pygrametl = self.scope['pygrametl']\n tables = pygrametl._alltables\n\n # Creates representation objects\n for table in tables:\n\n # If the table is a dimension.\n if self.check_table_type(table, DIM_CLASSES):\n if isinstance(table, TypeOneSlowlyChangingDimension):\n dim = SCDType1DimRepresentation(table, self.dw_conn)\n elif isinstance(table, SlowlyChangingDimension):\n dim = SCDType2DimRepresentation(table, self.dw_conn)\n else:\n dim = DimRepresentation(table, self.dw_conn)\n self.dim_reps.append(dim)\n\n # If the table is a fact table\n elif self.check_table_type(table, FT_CLASSES):\n ft = FTRepresentation(table, self.dw_conn)\n self.fts_reps.append(ft)\n\n # From the scope, gets all SnowflakedDimensions.\n # These are used to re-create the referencing structure of the DW,\n # when instantiating DWRepresentation.\n snowflakes = []\n for x, value in self.scope.items():\n if isinstance(value, SnowflakedDimension):\n snowflakes.append(value)\n\n dw_rep = DWRepresentation(self.dim_reps, self.dw_conn, self.fts_reps,\n snowflakes)\n\n # Clears the list of tables as its contents may otherwise be retained,\n # when a new Case is executed. This is because the list is mutable.\n pygrametl._alltables.clear()\n\n return dw_rep", "def builddimitable(self):\r\n # Can't figure out how to use a recursive generator/function to do this, see Apress Beginning Python p192\r\n # HACK!!: generate and exec the appropriate Python code to build the ordered (unshuffled/unrandomized) dimension index table\r\n dimi = [None]*len(self.dimensions) # stores the index we're currently on in each dimension\r\n self.dimitable = [] # ordered dimension index table, these are indices into the values in dimensions, dimensions are in columns, sweeps are in rows\r\n # generate code with the right number of nested for loops\r\n code = ''\r\n tabs = ''\r\n for dimension in self.dimensions: # generate ndim nested for loops...\r\n i = str(dimension.dim)\r\n code += tabs+'for dimi['+i+'] in range(len(self.dimensions['+i+'])):\\n'\r\n tabs += TAB # add a tab to tabs in preparation for the next for loop, or the innermost part of the last one\r\n code += tabs+'self.dimitable.append(copy(dimi))\\n' # innermost part of the nested for loops, copying dimi is important\r\n exec(code) # run the generated code, this builds the ordered dimitable with all the permutations\r\n '''\r\n # example of what the generated code looks like for 3 dimensions:\r\n for dimi[0] in range(len(self.dimensions[0])):\r\n for dimi[1] in range(len(self.dimensions[1])):\r\n for dimi[2] in range(len(self.dimensions[2])):\r\n self.dimitable.append(copy(dimi))\r\n '''\r\n self.dimitable = np.asarray(self.dimitable)\r\n self.checkdimitable()", "def _build_eval_metrics_store(self, specs):\n\n store = _EvalMetricsStore()\n for spec in specs:\n if not spec.eval_metrics:\n continue\n metric_fn, args = spec.eval_metrics\n store.add_eval_metrics(metric_fn, args)\n return store", "def createDroneDesigns(self):\n for designID, designInfo in self.droneDesigns.iteritems():\n myDesign = self.getDroneDesign(designID, designInfo[1],designInfo[2],designInfo[3],designInfo[0])\n self.droneDesignObjects[designID] = myDesign", "def load_dimensions(self):\r\n dimension_procs = ['usp_LoadDimSchool','usp_LoadDimSchoolYear','usp_LoadDimRace','usp_LoadDimGender']\r\n for proc in dimension_procs:\r\n self.load_data_from_staging(proc_name=proc)\r\n logging.info(\"Loaded dimension tables from staging.\")", "def get_dimension_list(self):\n dim_list = np.zeros(self.get_N_O_I(),dtype=np.int_)\n for i in range(dim_list.size):\n dim_list[i] = self.observables[i].get_dimension()\n return dim_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Init a configuration flow.
def init_config_flow(hass): config_flow.register_flow_implementation( hass, DOMAIN, client_id="id", client_secret="secret", api_key="123", redirect_uri="http://example.com", sensors=None, ) flow = config_flow.LogiCircleFlowHandler() flow._get_authorization_url = Mock(return_value="http://example.com") flow.hass = hass return flow
[ "def init(args):\n Configuration.load_config(vars(args).get(\"config\"))", "def __init__(self, config='config.json'):\n self.read_config(config)", "def __init__(self):\n\n self.config = {\n 'debug': False,\n 'enable': False,\n 'secret': '',\n 'timeout': 120,\n 'delay': 3,\n 'drift_backward': 1,\n 'drift_forward': 1,\n }\n self.config_path = os.path.join(os.environ['HOME'], '.ssh', 'otp')\n self.load()", "def __init__(self):\n\n\t\t# create ConfigParser() obj\n\t\tself.config = ConfigParser.ConfigParser()", "def initialize_config(self):\n\n def _logic(utterance: str) -> bool:\n \"\"\"\n Logic to be used by the logic-micromodel.\n \"\"\"\n return \"test\" in utterance.lower()\n\n configs = [\n {\n \"model_type\": \"svm\",\n \"name\": \"test_svm\",\n \"model_path\": os.path.join(self.model_path, \"test_svm\"),\n \"setup_args\": {\n \"training_data_path\": os.path.join(\n self.data_path, \"dog_vs_cat.json\"\n ),\n },\n },\n {\n \"model_type\": \"logic\",\n \"name\": \"test_logic\",\n \"model_path\": os.path.join(self.model_path, \"test_logic\"),\n \"setup_args\": {\"logic_func\": _logic},\n },\n {\n \"model_type\": \"bert_query\",\n \"name\": \"test_bert_query\",\n \"model_path\": os.path.join(self.model_path, \"test_bert_query\"),\n \"setup_args\": {\n \"threshold\": 0.8,\n \"seed\": [\n \"This is a test\",\n \"Arya is a hungry cat.\",\n ],\n \"infer_config\": {\n \"k\": 2,\n \"segment_config\": {\"window_size\": 5, \"step_size\": 3},\n },\n },\n },\n ]\n return configs", "def init_configuration(self):\n\n self.app.config.from_envvar('SETTINGS')", "def __init__(self, config):\n self.pipeline_config=config", "def __init__(self, *args, **kwargs):\n super(ExternalConfigurator, self).__init__(*args, **kwargs)\n\n # Set up reverter\n self.reverter = reverter.Reverter(self.config)\n self.reverter.recovery_routine()", "def __init__(self):\n raise ValueError('Config class can not be instantiated')", "def initialize():\n nonlocal current_state_id\n global strategy_dict\n if 'init_contract' not in options: # if init_contract i\n warnings.warn('Using most recently attempted strategy!')\n else:\n init_state, guart = options['init_contract']\n failures = options['init_fail']\n fails.append(failures)\n assm = contract_controller.to_assumption(init_state, failures)\n synthesize_contract(assm, guart)\n subprocess.run([parent_path + '/run', 'resyn'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # synthesize strategy online\n # load initial strategy\n load_strategy()\n if 'init_state' not in options: # if the initial state is not specified, then it will be randomly chosen\n current_state_id = np.random.choice(tuple(strategy_dict))\n else:\n current_state_id = look_up_state_id(options['init_state'])\n collect()", "def init(self):\n\n self.session.run(self.init_op)", "def __init__(self):\n DiffusionConfig.__init__(self)\n self.dipy_recon_config = Dipy_recon_configUI(\n imaging_model=self.diffusion_imaging_model,\n recon_mode=self.diffusion_model,\n tracking_processing_tool=self.tracking_processing_tool,\n )\n self.mrtrix_recon_config = MRtrix_recon_configUI(\n imaging_model=self.diffusion_imaging_model, recon_mode=self.diffusion_model\n )\n self.dipy_tracking_config = Dipy_tracking_configUI(\n imaging_model=self.diffusion_imaging_model,\n tracking_mode=self.diffusion_model,\n SD=self.mrtrix_recon_config.local_model,\n )\n self.mrtrix_tracking_config = MRtrix_tracking_configUI(\n tracking_mode=self.diffusion_model, SD=self.mrtrix_recon_config.local_model\n )\n\n self.mrtrix_recon_config.on_trait_change(\n self.update_mrtrix_tracking_SD, \"local_model\"\n )\n self.dipy_recon_config.on_trait_change(\n self.update_dipy_tracking_SD, \"local_model\"\n )\n self.dipy_recon_config.on_trait_change(\n self.update_dipy_tracking_sh_order, \"lmax_order\"\n )", "def nfp_module_init(sc, conf):\n\n # Create configurator module and de-multiplexer objects\n try:\n cm = get_configurator_module_instance(sc, conf)\n demuxer_instance = demuxer.ServiceAgentDemuxer()\n except Exception as err:\n msg = (\"Failed to initialize configurator de-multiplexer. %s.\"\n % (str(err).capitalize()))\n LOG.error(msg)\n raise Exception(err)\n else:\n msg = (\"Initialized configurator de-multiplexer.\")\n LOG.info(msg)\n\n # Initialize all the pre-loaded service agents\n try:\n cm.init_service_agents(sc, conf)\n except Exception as err:\n msg = (\"Failed to initialize configurator agent modules. %s.\"\n % (str(err).capitalize()))\n LOG.error(msg)\n raise Exception(err)\n else:\n msg = (\"Initialized configurator agents.\")\n LOG.info(msg)\n\n # Initialize RPC client for receiving messages from REST server\n try:\n init_rpc(sc, cm, conf, demuxer_instance)\n except Exception as err:\n msg = (\"Failed to initialize configurator RPC with topic %s. %s.\"\n % (const.CONFIGURATOR_RPC_TOPIC, str(err).capitalize()))\n LOG.error(msg)\n raise Exception(err)\n else:\n msg = (\"Initialized configurator RPC with topic %s.\"\n % const.CONFIGURATOR_RPC_TOPIC)\n LOG.debug(msg)", "def populate_initial_config(self):\n try:\n with openstack.OpenStack() as client:\n self._populate_system_config(client)\n self._populate_load_config(client)\n self._populate_network_config(client)\n if self.kubernetes:\n self._populate_dns_config(client)\n self._populate_docker_config(client)\n controller = self._populate_controller_config(client)\n # ceph_mon config requires controller host to be created\n self._inventory_config_complete_wait(client, controller)\n self._populate_interface_config(client, controller)\n self._populate_default_storage_backend(client, controller)\n\n except (KeystoneFail, SysInvFail) as e:\n LOG.exception(e)\n raise ConfigFail(\"Failed to provision initial system \"\n \"configuration\")", "def load_config(self):\n # Open the file at default lcoation, unless something else\n # is passed in instead\n self.logger.info('Running load_config() for HerdClient')\n if self.config is not None:\n self.logger.debug(\"There's a config file passed in\")\n f = file(self.config)\n self.cfg = Config(f)\n \n # Allow parameters passed on the command line to override the\n # config file\n if self.seed is None:\n self.logger.debug(\"There's no seed passed in\")\n self.seed = self.cfg.management.seed", "def init(config):\n from tiddlyweb.util import merge_config\n from tiddlywebplugins.lazy.config import config as lconfig\n\n merge_config(config, lconfig)", "def __init__(self, path, config, globalGenerator):\n self.path = path\n self.globalGenerator = globalGenerator", "def _configure(self):\n # Setup command line parser.\n argparser = argparse.ArgumentParser(description = self._description)\n argparser.add_argument('--config-file', help = 'name of the config file')\n argparser.add_argument('--inventory', help = 'name of the inventory file')\n argparser.add_argument('--group', help = 'name of the Ansible host group')\n argparser.add_argument('--fact-dir', help = 'name of the fact cache directory')\n argparser.add_argument('--ascii', help = 'print only ASCII characters (flag)', action = 'store_true', default = None)\n argparser.add_argument('--refresh', help = 'force host fact refresh (flag)', action = 'store_true', default = None)\n\n # Process command line arguments.\n self._config_cli = vars(argparser.parse_args())\n\n # IMPORTANT! Immediatelly rewrite the default value for configuration file\n # name, if the new value was received as command line argument.\n if not self._config_cli['config_file'] == None:\n self.config['config_file'] = self._config_cli['config_file']\n\n # Load configurations from external file.\n self._config_file = self.json_load(self.config.get('config_file'))\n\n # Merge all configurations together.\n self.config.update((k, v) for k, v in self._config_file.items() if v is not None)\n self.config.update((k, v) for k, v in self._config_cli.items() if v is not None)", "def __init__(self):\n path = os.environ.get(\n \"KEDRO_LOGGING_CONFIG\", Path(__file__).parent / \"default_logging.yml\"\n )\n logging_config = Path(path).read_text(encoding=\"utf-8\")\n self.configure(yaml.safe_load(logging_config))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test we prompt user to follow link if previously prompted.
async def test_we_reprompt_user_to_follow_link(hass: HomeAssistant) -> None: flow = init_config_flow(hass) result = await flow.async_step_auth("dummy") assert result["errors"]["base"] == "follow_link"
[ "def testNewUserContinueUrl(self):\n response = self.request_fetcher.get('/')\n m = re.search(r'<A HREF=\"(/settings[^\"]*)\">', response.body)\n continue_url = m.group(1)\n\n settings_response = self.request_fetcher.get(continue_url)\n self.assertIn('name=\"redirect_to\" value=\"snippet_entry\"',\n settings_response.body)\n\n # Now kinda-simulate clicking on the submit button\n done_response = self.request_fetcher.get(\n '/update_settings?u=user@example.com&redirect_to=snippet_entry')\n if done_response.status_int in (301, 302, 303, 304):\n done_response = done_response.follow()\n self.assertIn('Snippets for user@example.com', done_response.body)", "def test_user_check_following(self):\n pass", "def prompt_and_redirect(self, prompt_cotent, redirect_url=None):\n if redirect_url == None:\n redirect_url = self.request.headers.get('Referer', \"/\")\n \n self.render(\"common/prompt.html\", prompt=prompt_cotent, \n redirect=redirect_url)", "def test_profile_link(self) -> None:\n result: bool = False\n while not result:\n try:\n self.base.header.profile_link.is_displayed()\n result = True\n except StaleElementReferenceException:\n self.base.driver.refresh()\n\n link_text = self.base.header.profile_link.get_attribute('href')\n username: Optional[str] = os.getenv('DEFAULT_USERNAME')\n\n assert username in link_text", "def toggle_follow(self):\n self.__send_command(CommandsBytes.TOGGLE_FOLLOW)\n result = self.__receive_string()\n if result != \"ok\":\n print(\"Error toggling follow\")", "def test_user_current_check_following(self):\n pass", "def follow_state_changed(self, link, **kwargs):", "def test_follow_requires_a_username(self):\n self.instance.follow(None)\n\n assert self.session.put.called is False", "def test_follow(self):\n self.assert_requires_auth(self.instance.follow, \"foo\")", "def test_user_current_put_follow(self):\n pass", "def step_see_prompt(context):\n context.cli.expect('wharfee> ')", "def follow(self, link, spider):\n raise NotImplementedError()", "def links_to_follow(href):\n return href and href.startswith(\"/wiki/\") and \":\" not in href", "def test_frontend_follow_unfollow_button(self):\n # Create a user\n new_user = User.objects.create_user(username=\"1\", password=\"1\")\n\n # Login user\n self.login_quick()\n # Get to self.user profile page\n self.browser.get(self.live_server_url + f\"/profile/{new_user.username}\")\n\n # Try to follow new_user and check if it works\n follow_button = self.browser.find_element_by_name(\"follow\")\n follow_button.click()\n time.sleep(0.1)\n self.assertEqual(UserFollowing.objects.filter(user_id=self.user, following_user_id=new_user).count(), 1)\n\n # Try to unfollow new_user and check it works\n unfollow_button = self.browser.find_element_by_name(\"unfollow\")\n unfollow_button.click()\n time.sleep(0.1)\n self.assertEqual(UserFollowing.objects.filter(user_id=self.user, following_user_id=new_user).count(), 0)", "def test_omnipresent_links(self):\n content = self.client.get(reverse('rango:index')).content.decode()\n self.assertTrue('href=\"/rango/about/\"' in content)\n self.assertTrue('href=\"/rango/\"' in content)\n\n user_object = create_user_object()\n self.client.login(username='testuser', password='testabc123')\n\n # These should be present.\n content = self.client.get(reverse('rango:index')).content.decode()\n self.assertTrue('href=\"/rango/about/\"' in content, f\"{FAILURE_HEADER}Please check the links in your base.html have been updated correctly to change when users log in and out.{FAILURE_FOOTER}\")\n self.assertTrue('href=\"/rango/\"' in content, f\"{FAILURE_HEADER}Please check the links in your base.html have been updated correctly to change when users log in and out.{FAILURE_FOOTER}\")", "def already_following():\n\n user_id = request.args['user_id']\n follower_id = request.args['follower_id']\n\n return(str(is_following(user_id, follower_id)))", "def _getUserConfirmation(self, message=\"Proceed anyways? (y) or (n): \"):\n proceed = ''\n while not proceed:\n proceed = input(message)\n if len(proceed) and not proceed[0] in ['Y', 'y', 'N', 'n']:\n proceed = ''\n print(\"Please enter 'y' or 'n': \", end='')\n elif len(proceed) and proceed[0].lower() == 'y':\n return True\n elif len(proceed) and proceed[0].lower() == 'n':\n return False", "def follow_back(self, splitter, item, user):\n if self.social:\n return item == user or item in splitter.train_set.keys() and user in splitter.train_set[item]\n else:\n return False", "def test_non_setup_url_redirects_to_setup(self):\n self.client.login(username='john', password='foo')\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 302)\n self.assertEqual(\n urlsplit(response.url).path,\n self.client_user.setup_step_url\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test we bypass picking implementation if we have one flow_imp.
async def test_not_pick_implementation_if_only_one(hass: HomeAssistant) -> None: flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.FlowResultType.FORM assert result["step_id"] == "auth"
[ "def HasIMP(self):\n return self.__has('IMP')", "def test_case_pocket_insane_none(self):\n\t\ttheResult = True\n\t\ttry:\n\t\t\tfrom .context import piaplib\n\t\t\tif piaplib.__name__ is None:\n\t\t\t\ttheResult = False\n\t\t\tfrom piaplib import pocket\n\t\t\tif pocket.__name__ is None:\n\t\t\t\ttheResult = False\n\t\t\tfrom piaplib import pocket as pocket\n\t\t\tif pocket.__name__ is None:\n\t\t\t\traise ImportError(\"Failed to import pocket\")\n\t\t\tself.assertIsNone(pocket.useTool(\"NoSuchTool\"))\n\t\t\tself.assertIsNone(pocket.useTool(None))\n\t\texcept Exception as err:\n\t\t\tprint(str(\"\"))\n\t\t\tprint(str(type(err)))\n\t\t\tprint(str(err))\n\t\t\tprint(str((err.args)))\n\t\t\tprint(str(\"\"))\n\t\t\terr = None\n\t\t\tdel err\n\t\t\ttheResult = False\n\t\tassert theResult", "def _is_feed_forward(self, op):\n return len(op.measurement_deps) != 0", "def skip_if_flow(func):\n @wraps(func)\n def wrapped_func(self, test, *args, **kwargs):\n if isinstance(test, TestFlow):\n return\n\n return func(self, test, *args, **kwargs)\n\n return wrapped_func", "def _checkRoundTrip(self, obj):\r\n tripped = reflect.namedAny(reflect.qual(obj))\r\n if tripped is not obj:\r\n raise RuntimeError(\"importing %r is not the same as %r\" %\r\n (reflect.qual(obj), obj))", "def is_passthrough(self, op):\n op_handler = self._op_handler_dict[op.type]\n return op_handler.is_passthrough", "def fleurinpgen_needed(self):\n return self.ctx.run_inpgen", "async def test_onboarding_not_supported(hass: HomeAssistant) -> None:\n with patch(MODULE, return_value=None):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": \"onboarding\"},\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"no_devices_found\"", "def test_i_decide_not_to_buy_the_product():", "def test_zope38(self):\r\n with SetAsideModule(\"zope\"):\r\n self.install((3, 8))\r\n try:\r\n from zope import interface\r\n except Exception as exc:\r\n self.assertEqual(\r\n \"zope.interface.exceptions.InvalidInterface: \"\r\n \"Concrete attribute, __qualname__\",\r\n str(exc))\r\n else:\r\n self.fail(\r\n \"InvalidInterface was not raised by zope.interface import\")", "def test_edge_case_A_inv_none():\n with pytest.raises(AssertionError):\n amplify(A, None, oracle, qubits, iters)", "def _test_lti_get_resource_other_pl_pl_auto_portable_ready_to_show(\n self, factory, model\n ):\n playlist = factories.PlaylistFactory(consumer_site__domain=\"example.com\")\n passport = factories.ConsumerSiteLTIPassportFactory(\n consumer_site=playlist.consumer_site\n )\n resource = factory(\n playlist__is_portable_to_playlist=False,\n playlist__is_portable_to_consumer_site=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n # Add automatic portability from the playlist of the video to the requested playlist\n models.PlaylistPortability.objects.create(\n source_playlist=resource.playlist, target_playlist=playlist\n )\n\n data = {\n \"resource_link_id\": resource.lti_id,\n \"context_id\": playlist.lti_id,\n \"roles\": random.choice([\"Student\", \"Instructor\"]),\n \"oauth_consumer_key\": passport.oauth_consumer_key,\n }\n request = self.factory.post(\"/\", data, HTTP_REFERER=\"https://example.com/route\")\n lti = LTI(request, resource.pk)\n lti.verify()\n retrieved_resource = get_or_create_resource(model, lti)\n self.assertIsInstance(retrieved_resource, model)\n self.assertEqual(retrieved_resource, resource)\n\n # No new playlist or resource are created\n self.assertEqual(models.Playlist.objects.count(), 2)\n self.assertEqual(model.objects.count(), 1)", "def has_import_star(self, ):\n\t\tpass", "def _detect_pixelbased_models(self):\n source_model_list = self.SourceModel.profile_type_list\n if 'SLIT_STARLETS' in source_model_list or 'SLIT_STARLETS_GEN2' in source_model_list:\n if len(source_model_list) > 1:\n raise ValueError(\"'SLIT_STARLETS' or 'SLIT_STARLETS_GEN2' must be the only source model list for pixel-based modelling\")\n return True\n return False", "def proxy(fb: pyuavcan.transport.Feedback) -> None:\n if inferior_session not in self._inferiors:\n _logger.warning(\n \"%s got unexpected feedback %s from %s which is not a registered inferior. \"\n \"The transport or its underlying software or hardware are probably misbehaving, \"\n \"or this inferior has just been removed.\",\n self,\n fb,\n inferior_session,\n )\n return\n\n handler = self._feedback_handler\n if handler is not None:\n new_fb = RedundantFeedback(fb, inferior_session)\n try:\n handler(new_fb)\n except Exception as ex:\n _logger.exception(\"%s: Unhandled exception in the feedback handler %s: %s\", self, handler, ex)\n else:\n _logger.debug(\"%s ignoring unattended feedback %r from %r\", self, fb, inferior_session)", "def test_enotimp(self):\n self.assertIs(self.exceptionForCode(ENOTIMP), DNSNotImplementedError)", "def test_set_interface_fails_if_not_instance():\n e = Experiment()\n answer = 42\n e.interface = answer", "def test_implementsIntegrator(self):\n integrator = ElasticityExplicitLgDeform()\n from pylith.feassemble.Integrator import implementsIntegrator\n self.failUnless(implementsIntegrator(integrator))\n return", "def test_one_switch_oversubscribe(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test generating authorize URL from Logi Circle API.
async def test_gen_auth_url(hass: HomeAssistant, mock_logi_circle) -> None: config_flow.register_flow_implementation( hass, "test-auth-url", client_id="id", client_secret="secret", api_key="123", redirect_uri="http://example.com", sensors=None, ) flow = config_flow.LogiCircleFlowHandler() flow.hass = hass flow.flow_impl = "test-auth-url" await async_setup_component(hass, "http", {}) result = flow._get_authorization_url() assert result == "http://authorize.url"
[ "def make_authorization_url(request):\n api_url = \"https://ssl.reddit.com/api/v1/authorize?\"\n request.session[\"oauth_reddit_state\"] = str(uuid4())\n params = urlencode(\n {\n \"client_id\": settings.OAUTH_REDDIT_CLIENT_ID,\n \"response_type\": \"code\",\n \"state\": request.session[\"oauth_reddit_state\"],\n \"redirect_uri\": settings.OAUTH_REDDIT_REDIRECT_URI,\n \"duration\": settings.OAUTH_REDDIT_DURATION,\n \"scope\": settings.OAUTH_REDDIT_SCOPE,\n }\n )\n\n return api_url + params", "def get_authorization_url(self, callback_url, **kwargs):", "def get_auth_url(self):\n url = urljoin(UIT_API_URL, 'authorize')\n\n data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'state': self.session_id,\n 'scope': self.scope\n }\n\n return url + '?' + urlencode(data)", "def create_login_url():\n\n params = {\n 'response_type' : 'code',\n 'redirect_uri' : constants.OAUTH_CALLBACK_URL,\n 'client_id' : constants.CLIENT_ID,\n 'scope' : 'read:user',\n 'district_id' : constants.DISTRICT_ID\n }\n\n return redirect(constants.AUTHORIZE_REQUEST_URL +\n '?' + urllib.urlencode(params))", "def test_authorization_show(self):\n pass", "def test_api_v1_authenticate_identity_redirect_url_get(self):\n pass", "def test_authorize_door_granted():\n result = requests.get(API_ENTRY_authorize_door.format(\"CB06.01.01\", \"729\")).content\n assert result == \"1\"", "def authorization_url(self):\n\n auth_url = self.openid_config[\"authorization_endpoint\"]\n if self.config.MICROSOFT_AUTH_LOGIN_TYPE == LOGIN_TYPE_XBL:\n auth_url = self._xbox_authorization_url\n\n extra_parameters = self.config.MICROSOFT_AUTH_EXTRA_PARAMETERS\n extra_parameters[\"response_mode\"] = \"form_post\"\n built_auth_url = super().authorization_url(auth_url, **extra_parameters)\n return built_auth_url", "def test_build_uri(self):\n iiq = insightiq_api.InsightiqApi(username='pat', password='a')\n\n value = iiq._build_uri('someEndpoint')\n expected = 'https://localhost/someEndpoint'\n\n self.assertEqual(value, expected)", "def generate_access_code(test=False):\n reddit = get_reddit()\n url = reddit.get_authorize_url('uniqueKey', properties.SCOPES, True)\n if test:\n return url\n else:\n webbrowser.open(url)", "async def test_callback_view_accepts_code(\n hass: HomeAssistant, mock_logi_circle\n) -> None:\n init_config_flow(hass)\n view = LogiCircleAuthCallbackView()\n\n resp = await view.get(MockRequest(hass, {\"code\": \"456\"}))\n assert resp.status == HTTPStatus.OK\n\n await hass.async_block_till_done()\n mock_logi_circle.authorize.assert_called_with(\"456\")", "def oauth():\n print _get_rand_hash()\n print _get_rand_hash()", "def oauth():\r\n print _get_rand_hash()\r\n print _get_rand_hash()", "def distantAuthCall ( api_request=None, query={}, payload={}, func_name='user_login') :\n\n print (\". \"*50)\n log.debug(\"distantAuthCall/ payload : \\n%s\", pformat(payload) )\n log.debug(\"distantAuthCall/ log_type : %s\", func_name )\n\n ### retrieve distant auth url root\n auth_url_root = getDistantAuthUrl()\n log.debug(\"distantAuthCall/ auth_url_root : %s\", auth_url_root )\n\n ### retrieve distant auth endpoint config\n endpoint_config = getDistantEndpointconfig(func_name)\n log.debug(\"distantAuthCall/ endpoint_config : \\n%s\", pformat(endpoint_config) )\n \n url = endpoint_config[\"url\"]\n method = endpoint_config[\"method\"]\n url_args = endpoint_config[\"url_args\"]\n post_args = endpoint_config[\"post_args\"]\n url_append = endpoint_config[\"url_append\"]\n resp_path = endpoint_config[\"resp_path\"]\n\n\n ### build url base for specific auth\n base_url = auth_url_root + url \n log.debug(\"distantAuthCall/ base_url : %s\", base_url )\n\n\n\n\n ### TO DO : append url_append value\n # get param from request\n log.debug(\"distantAuthCall / url_append : %s\", url_append )\n if url_append : \n # log.debug(\"distantAuthCall / api_request : \\n%s\", pformat(api_request.__dict__) )\n url_append_string = \"\"\n url_append_list = []\n view_args = api_request.view_args\n log.debug(\"distantAuthCall / view_args : \\n%s\", pformat(view_args) )\n for append_arg in url_append : \n append_val = view_args[append_arg]\n url_append_list.append(append_val)\n url_append_string = \"/\".join(url_append_list)\n base_url += url_append_string\n\n\n\n\n\n \n\n ### append distant auth request headers\n headers = app.config[\"AUTH_URL_HEADERS\"]\n if payload :\n headers = app.config[\"AUTH_URL_HEADERS_PAYLOAD\"]\n\n ### TO DO : add token to requests in headers or query_string\n token = getTokenFromRequest(api_request)\n log.debug(\"token : %s\", token )\n\n token_query_string = \"\"\n\n if token :\n token_locations = app.config[\"AUTH_URL_TOKEN_LOCATION\"]\n \n if \"query_string\" in token_locations and \"headers\" not in token_locations : \n token_query_string_name = app.config[\"AUTH_URL_TOKEN_QUERY_STRING_NAME\"]\n token_query_string = \"{}={}\".format(token_query_string_name,token)\n\n if \"headers\" in token_locations : \n token_header_name = app.config[\"AUTH_URL_TOKEN_HEADER_NAME\"]\n token_header_type = app.config[\"AUTH_URL_TOKEN_HEADER_TYPE\"]\n headers[token_header_name] = token\n\n log.debug(\"distantAuthCall / headers : \\n%s\", pformat(headers) )\n\n\n\n\n ### TO DO : append url_args\n url_args_string = \"\"\n if url_args :\n url_args_string = \"?\"\n for arg_k, arg_v in url_args.items() : \n url_args_string += \"&{}={}\".format( arg_k, query[arg_v] )\n query_url = base_url + url_args_string + token_query_string\n log.debug(\"distantAuthCall / query_url : %s\", query_url)\n\n\n\n ### send request to service and read response\n if method == 'GET' : \n response = requests.get(query_url, headers=headers)\n\n elif method == 'DELETE' : \n response = requests.delete(query_url, headers=headers)\n\n elif method in ['POST', 'PUT'] :\n\n ### TO DO : rebuild payload given \n\n # remap payload given endpoint connfig \n payload_type = type(payload)\n log.debug(\"distantAuthCall / payload_type : %s\", payload_type )\n \n if post_args : \n if payload_type == dict : \n payload_remapped = {\n post_args[k] : v for k,v in payload.items() if k in post_args.keys()\n }\n elif payload_type == list : \n payload_remapped = []\n for p in payload : \n p_remapped = {\n post_args[k] : v for k,v in p.items() if k in post_args.keys()\n }\n payload_remapped.append(p_remapped)\n else : \n payload_remapped = payload\n log.debug(\"distantAuthCall / payload_remapped : \\n%s\", pformat(payload_remapped) )\n\n # then payload as json\n payload_json = json.dumps(payload_remapped)\n log.debug(\"distantAuthCall / payload_json : %s\", payload_json )\n\n if method == 'POST' : \n response = requests.post(query_url, data=payload_json, headers=headers)\n\n elif method == 'PUT' : \n response = requests.put(query_url, data=payload_json, headers=headers)\n\n\n log.debug(\"distantAuthCall / response.status_code : %s\", response.status_code )\n response_json = response.json()\n # log.debug(\"distantAuthCall / response_json : \\n%s\", pformat(response_json) )\n \n if resp_path : \n ### remap response_json given resp_path if specific \n response_json = { arg_k : response_json[arg_v] for arg_k, arg_v in resp_path.items() if arg_v in response_json.keys() }\n\n return response_json", "def get_authorization_url(self):\n (status, token, error) = self._get_request_token()\n if not status:\n return (False, None, error)\n\n data = {\n 'url': authenticate_url(token[0]),\n 'access_token': token[0],\n 'access_token_secret': token[1]\n }\n\n return (True, data, None)", "def test_auth(self):\n pass", "def test_redirect_to_uaac(self, uaac):\n with app.test_client() as c:\n rv = c.get('/')\n\n assert rv.status_code == 302\n target = app.config['UAA_BASE_URL'] + '/oauth/authorize?client_id=' + app.config['UAA_CLIENT_ID']\n assert rv.location == target", "def request_token_url():\n return _BASE_URL_V1 % 'oauth/request_token/'", "def test_auth_token(get_data):\n assert os.environ['OANDA_PRACTISE_TOKEN'] in\\\n get_data.headers['Authorization']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the auth callback view rejects requests with no code.
async def test_callback_view_rejects_missing_code(hass: HomeAssistant) -> None: view = LogiCircleAuthCallbackView() resp = await view.get(MockRequest(hass, {})) assert resp.status == HTTPStatus.BAD_REQUEST
[ "def test_unauthorized_view_fails(self):\n response = self.api_client.get('/account/', format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def unauthorized_callback():\n return redirect(url_for('auth.login'))", "def test_raise_func_false(self):\n user = self.build_unauthorized_user()\n req = self.build_request(user=user, path=self.view_url)\n\n def func(request):\n return False\n\n with self.assertRaises(PermissionDenied):\n self.dispatch_view(req, raise_exception=func)", "def test_auth_fail_verification(self):\n self.get_creds.return_value = {\"login\": \"dsa\", \"password\": \"foobar\"}\n self.request.access.verify_user.return_value = False\n userid = self.policy.authenticated_userid(self.request)\n self.assertIsNone(userid)", "def test_post_grant_authorization_code_no_uris(self):\n self._test_post_redirect_uri_grant_combination(\n redirect_uris='',\n grant_type=Application.GRANT_AUTHORIZATION_CODE,\n is_valid=False,\n )", "def invalid_token(callback):\n return make_response(render_template(\n \"components/401.html\"))", "def test_profile_unauthenticated(self):\n response = self.client.get('/profile/')\n eq_(response.status_code, 403)", "def test_raise_permission_denied(self):\n user = self.build_unauthorized_user()\n req = self.build_request(user=user, path=self.view_url)\n\n with self.assertRaises(PermissionDenied):\n self.dispatch_view(req, raise_exception=True)", "def test_status_code_code_for_empty_authorization_value(self):\n\n resp = HttpResponse()\n http_response = resp. get_http_reponse()\n expected_status_code = 401\n received_status_code = http_response.status_code\n self.assertEqual(expected_status_code, received_status_code)", "def test_protected_resource_access_denied():\n with client.session_transaction() as local_session:\n local_session.clear()\n rv = client.get('/api/auth/me',\n content_type='application/json')\n assert rv.status_code == 401\n response = json.loads(rv.data)\n assert not response['authenticated']\n assert response['message'] == ('Invalid or nonexistent token. '\n 'Please get a new token.')", "def test_authentication_challenge_cancel_post(self):\n pass", "def test_anonymous_raises_exception(self):\n with self.assertRaises(PermissionDenied):\n self.dispatch_view(\n self.build_request(path=self.view_url), raise_exception=True)", "def test_email_confirmation_wrong_code(self):\n res = self.testapp.reset()\n res = self.testapp.get('/verify/foo@shri.de/WRONGCODE', status=200)\n #print(res.body)\n self.failUnless(\"Not found. check URL.\" in res.body)", "def testBeforeOrgSignupAccessDenied(self):\n self.app_survey.survey_start = timeline_utils.future(delta=100)\n self.app_survey.survey_end = timeline_utils.future(delta=150)\n self.app_survey.put()\n\n access_checker = access.OrgSignupActiveAccessChecker()\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)", "def test_authorize_door_denied():\n result = requests.get(API_ENTRY_authorize_door.format(\"-1\", \"-1\")).content\n assert result == \"0\"", "def testUserWithNoProfileAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n\n access_checker = access.HAS_PROFILE_ACCESS_CHECKER\n with self.assertRaises(exception.UserError) as context:\n access_checker.checkAccess(self.data, None)\n self.assertEqual(context.exception.status, httplib.FORBIDDEN)", "def test_standard_failure(self):\n class Resource(object):\n @guard.guard(make_checker(False))\n def denied(self, request):\n pass\n request = http.Request.blank('/')\n try:\n Resource().denied(request)\n except http.UnauthorizedError, e:\n response = e.make_response()\n assert response.headers['Content-Type'] == 'text/plain'\n assert response.body == \"\"\"401 Unauthorized\\n\\nchecker #1 failed\\n\"\"\"\n else:\n self.fail()", "def test_guest_cannot_view(self):\n with self.settings(SPACES_AUTH_GUEST_CAN_VIEW=False):\n merge_settings()\n\n response = self.client.get(self.view_url)\n self.assert_auth_redirect(response)", "def test_get_an_interest_by_unauthenticated_user_fails(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_body[\"SubCode\"], \"InvalidToken\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the auth callback view handles requests with auth code.
async def test_callback_view_accepts_code( hass: HomeAssistant, mock_logi_circle ) -> None: init_config_flow(hass) view = LogiCircleAuthCallbackView() resp = await view.get(MockRequest(hass, {"code": "456"})) assert resp.status == HTTPStatus.OK await hass.async_block_till_done() mock_logi_circle.authorize.assert_called_with("456")
[ "async def test_callback_view_rejects_missing_code(hass: HomeAssistant) -> None:\n view = LogiCircleAuthCallbackView()\n resp = await view.get(MockRequest(hass, {}))\n\n assert resp.status == HTTPStatus.BAD_REQUEST", "def test_auth(self):\n pass", "def authentication_callback(request):\n code = request.GET.get('code')\n user = authenticate(token=code, request=request)\n\n if user.is_anonymous():\n #we have to set this user up\n url = reverse('facebook_setup')\n url += \"?code=%s\" % code\n\n resp = HttpResponseRedirect(url)\n\n else:\n auth_login(request, user)\n\n #figure out where to go after setup\n url = getattr(settings, \"LOGIN_REDIRECT_URL\", \"/\")\n\n resp = url(user)\n return resp", "def on_authorization_callback(self,\n callback_url,\n **callback_arguments):", "def test_check_authorization(self):\n self.instance.set_client_id(\"client-id\", \"client-secret\")\n self.instance.check_authorization(\"super-fake-access-token\")\n\n self.session.get.assert_called_once_with(\n url_for(\"applications/client-id/tokens/super-fake-access-token\"),\n params={\"client_id\": None, \"client_secret\": None},\n auth=(\"client-id\", \"client-secret\"),\n )", "def authcallback():\r\n # If we're coming back from Globus Auth in an error state, the error\r\n # will be in the \"error\" query string parameter.\r\n if 'error' in request.args:\r\n flash(\"You could not be logged into the portal: \" +\r\n request.args.get('error_description', request.args['error']))\r\n return redirect(url_for('home'))\r\n\r\n # Set up our Globus Auth/OAuth2 state\r\n redirect_uri = url_for('authcallback', _external=True)\r\n\r\n client = load_portal_client()\r\n client.oauth2_start_flow(redirect_uri,\r\n refresh_tokens=True,\r\n requested_scopes=app.config['SCOPES'])\r\n\r\n # If there's no \"code\" query string parameter, we're in this route\r\n # starting a Globus Auth login flow.\r\n if 'code' not in request.args:\r\n additional_authorize_params = (\r\n {'signup': 1} if request.args.get('signup') else {})\r\n\r\n auth_uri = client.oauth2_get_authorize_url(\r\n additional_params=additional_authorize_params)\r\n\r\n return redirect(auth_uri)\r\n else:\r\n # If we do have a \"code\" param, we're coming back from Globus Auth\r\n # and can start the process of exchanging an auth code for a token.\r\n code = request.args.get('code')\r\n tokens = client.oauth2_exchange_code_for_tokens(code)\r\n\r\n id_token = tokens.decode_id_token(client)\r\n session.update(\r\n tokens=tokens.by_resource_server,\r\n is_authenticated=True,\r\n name=id_token.get('name', ''),\r\n email=id_token.get('email', ''),\r\n institution=id_token.get('institution', ''),\r\n primary_username=id_token.get('preferred_username'),\r\n primary_identity=id_token.get('sub'),\r\n )\r\n\r\n return redirect(url_for('index'))", "def handle_yext_auth_callback():\n auth_code = request.args.get('code')\n error = request.args.get('error')\n error_description = request.args.get('error_description')\n\n if error:\n # Redirect user to landing page with error if authorization fails.\n return render_template('auth_callback.html',\n redirect=url_for('show_index', auth_error=error_description))\n\n try:\n yext_account_id, yext_access_token = exchange_yext_auth_code(auth_code)\n except:\n return render_template('auth_callback.html',\n redirect=url_for(\n 'show_index',\n auth_error='Unable to link to Yext'))\n\n # Save user's Yext account information.\n session['yext_account_id'] = yext_account_id\n datastore.save_yext_access_token(yext_account_id, yext_access_token)\n\n # Redirect user to Twitter OAuth flow.\n callback_url = url_for('handle_twitter_auth_callback', _external=True)\n return twitter.authorize(callback=callback_url)", "def test_authorization_show(self):\n pass", "def test_authenticated_request_view_returns_200(self):\n self.login_user(self.get_authenticated_user())\n\n response = self.client.get(reverse('misago:forgotten-password'))\n self.assertEqual(response.status_code, 200)", "def test_callback(request, realm_authentication_session):\n return reverse(\"realms:authentication_session\",\n args=(realm_authentication_session.realm.pk,\n realm_authentication_session.pk))", "def test_view_only_should_response_for_authenticate_users(self):\n response = self.client.get(path=self.test_room_chat)\n self.assertEqual(\n 302,\n response.status_code,\n msg=u'must redirect to login page',\n )\n self.assertEqual(\n settings.LOGIN_URL,\n urlparse.urlsplit(response.url).path,\n )\n self.assertTrue(self.client.login(**self.credentials))", "def auth_callback():\n if 'error' in request.args:\n flash(\"You could not be logged into the portal: \"\n + request.args.get('error_description'),\n request.args['error'])\n return redirect('/')\n scheme = 'http' if 'localhost' in request.base_url else 'https'\n redirect_uri = url_for('auth_callback', _external=True, _scheme=scheme)\n\n client = load_app_client()\n client.oauth2_start_flow(redirect_uri, refresh_tokens=True)\n\n # If there's no \"code\" query string param, start a Globus Auth login flow\n if 'code' not in request.args:\n auth_uri = client.oauth2_get_authorize_url()\n return redirect(auth_uri)\n\n # Otherwise, we're coming back from Globus Auth with a code\n code = request.args.get('code')\n tokens = client.oauth2_exchange_code_for_tokens(code)\n auth_tokens = tokens.by_resource_server[\"auth.globus.org\"]\n ac = AuthClient(authorizer=AccessTokenAuthorizer(auth_tokens[\"access_token\"]))\n id_token = ac.oauth2_userinfo()\n\n session.update(\n tokens=tokens.by_resource_server,\n is_authenticated=True,\n name=id_token.get('name', ''),\n email=id_token.get('email', ''),\n institution=id_token.get('organization', ''),\n sub=id_token.get('sub')\n )\n\n access_token = session['tokens']['auth.globus.org']['access_token']\n token_introspect = client.oauth2_token_introspect(token=access_token,\n include=\"identity_set\")\n identity_set = token_introspect.data[\"identity_set\"]\n\n for identity in identity_set:\n user = UserModel.find_by_sub(identity)\n if user:\n session['user_id'] = user.id\n session['admin'] = user.admin\n return redirect(url_for('user-dashboard'))\n return redirect(url_for('create_profile'))", "def auth():\n\n return redirect(f'https://api.twitch.tv/kraken/oauth2/authorize?response_type=code&client_id=g37b9kh93q0fiihc931e29gwihf2q9&redirect_uri={REDIRECT_URI}&scope=user_read')", "def authenticate_request(self, request):", "async def test_oauth2_callback(setup: SetupTest) -> None:\n setup.configure(\"oidc\")\n token = setup.create_upstream_oidc_token(groups=[\"admin\"])\n setup.set_oidc_token_response(\"some-code\", token)\n setup.set_oidc_configuration_response(setup.config.issuer.keypair)\n assert setup.config.oidc\n return_url = \"https://example.com/foo\"\n\n r = await setup.client.get(\n \"/login\", params={\"rd\": return_url}, allow_redirects=False\n )\n assert r.status_code == 307\n url = urlparse(r.headers[\"Location\"])\n query = parse_qs(url.query)\n assert query[\"redirect_uri\"][0] == setup.config.oidc.redirect_url\n\n # Simulate the return from the OpenID Connect provider.\n r = await setup.client.get(\n \"/oauth2/callback\",\n params={\"code\": \"some-code\", \"state\": query[\"state\"][0]},\n allow_redirects=False,\n )\n assert r.status_code == 307\n assert r.headers[\"Location\"] == return_url", "def test_authentication_challenge_get_post(self):\n pass", "def test_messenger_page_with_auth(self):\n\n self.client.login(username='ahalan', password='12345')\n response = self.client.get(self.messenger_path, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'messenger.html')\n self.assertTrue('<!DOCTYPE html>' in response.content)\n self.assertTrue(response.context['user'].is_authenticated())", "def checkAuth(self):\n\n if self._secret:\n password = None\n auth = request.authorization\n if auth and auth.username == self._secret[0]:\n password = self._secret[1]\n if not self._auth.authenticate(auth, password):\n return self._auth.auth_error_callback()", "def testViewViewAuthenticated(self):\n self.client.login(username='samuel', password='testing')\n response = self.client.get(reverse('pub_view', args=[1]))\n self.assertEquals(response.status_code, 200)\n self.assertEquals(type(response.context[-1]['reading']),\n type(Reading()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if frames do not have I3TriggerHierarchy put it there by clipping in time
def ClipTriggerHierarchy(frame): qth = frame[TriggerHierarchyName] tw = frame["TriggerSplitterLaunchWindow"] th = IceHive.clip_TriggerHierarchy(qth, tw, [1011, 1006,1007,21001]) #SMT8, SMT3, String, Volume-trigger frame.Put("I3TriggerHierarchy", th)
[ "def _clipchanged(self):\n if not self._parent or not self._onscreen_wid:\n return\n self._zapclip()\n self._buttonschanged()\n self._zapregions()", "def GetClipRegion(self):\n ...", "def _clipchanged(self):\n if not self._parent or not self._onscreen_wid:\n return\n self._parent._clipchanged()", "def overlayFixedOnTransformed(self):\n fixedNode = self.volumeSelectors['Fixed'].currentNode()\n transformedNode = self.volumeSelectors['Transformed'].currentNode()\n if transformedNode:\n compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*')\n for compositeNode in compositeNodes.values():\n if compositeNode.GetBackgroundVolumeID() == transformedNode.GetID():\n compositeNode.SetForegroundVolumeID(fixedNode.GetID())\n compositeNode.SetForegroundOpacity(0.5)", "def _fixBoundsAndDraw(self):\n self.ul = np.maximum(0,np.minimum(self.ul, self.imShape-self.shape))\n self.shape = np.minimum(np.maximum(PanAndZoomState.MIN_SHAPE,self.shape), self.imShape-self.ul)\n self.parentWindow.redrawImage()", "def _fix_rect(self):\n # Offset logic is to always work with copies, to avoid\n # flying effects from multiple calls to _fix_rect\n # See footwork in draw\n if hasattr(self.rect, 'collidepoint'):\n self.rect = self.rect.move(self.scene.OFFSET)\n else:\n self.rect = [x.move(self.scene.OFFSET) for x in self.rect]", "def default_move_backdrops(curr_loc, world) :\n for backdrop in world.activity.objects_of_kind(\"backdrop\") :\n locations = world[BackdropLocations(backdrop)]\n if locations == \"everywhere\" :\n world.activity.put_in(backdrop, curr_loc)\n else :\n for loc in locations :\n if world[Contains(loc, curr_loc)] : # for handling regions\n world.activity.put_in(backdrop, curr_loc)\n break", "def clip(self, other_frame):\n max_x = other_frame.w - self.w\n max_y = other_frame.h - self.h\n if (self.x > max_x):\n self.x = max_x\n if (self.y > max_y):\n self.y = max_y\n if (self.x < other_frame.x):\n self.x = 0\n if (self.y < other_frame.y):\n self.y = 0", "def addClip(self, item):\n items = self.clipper.get(0,END)\n if item not in items:\n self.clipper.insert(0, item)\n if self.clipper.size() > 20:\n self.clipper.delete(0)", "def reset_timeline_mask(context):\n\n scene = context.scene\n anim_offset = scene.animaide.anim_offset\n\n scene.frame_preview_start = anim_offset.user_preview_start\n scene.frame_preview_end = anim_offset.user_preview_end\n scene.use_preview_range = anim_offset.user_preview_use\n scene.frame_start = anim_offset.user_scene_start\n scene.frame_end = anim_offset.user_scene_end\n # scene.tool_settings.use_keyframe_insert_auto = anim_offset.user_scene_auto", "def to_ImageClip(self,t=0):\n return ImageClip(self.get_frame(t))", "def take_hit(self, hit):\n\n inner_p = self.master.from_global_to_self(hit.trace.p)\n inner_p = gm.Point2(inner_p.x, inner_p.y)\n inner_trace = hit.trace.copy()\n inner_trace.p = inner_p\n cleaved = False\n if CHOP in hit.features:\n self.body_parts.sort(lambda a, b: a.chop_priority - b.chop_priority)\n else:\n self.body_parts.sort(lambda a, b: a.stab_priority - b.stab_priority)\n for part in self.body_parts:\n in_p = part.shape.intersect(inner_trace)\n if in_p is not None:\n p = self.master.from_self_to_global(part.shape.pc)\n eff.Blood().add_to_surface(p)\n part.collide(hit)\n if CLEAVE not in hit.features:\n break\n cleaved = True\n else:\n if not cleaved:\n return\n if PENETRATE not in hit.features:\n hit.complete()", "def remove_mask(context):\n\n anim_offset = context.scene.animaide.anim_offset\n blends_action = bpy.data.actions.get('animaide')\n blends_curves = getattr(blends_action, 'fcurves', None)\n\n anim_offset.mask_in_use = False\n if blends_curves is not None and len(blends_curves) > 0:\n blends_curves.remove(blends_curves[0])\n # reset_timeline_mask(context)\n\n return", "def clipping(self):\n self.imgs = []\n for i in range(len(self.rectangleAreas)):\n x, y, w, h = self.rectangleAreas[i][0], self.rectangleAreas[i][1], self.rectangleAreas[i][2], self.rectangleAreas[i][3]\n \n self.crop_img = self.original_img[y:y+h, x:x+w]\n self.resized_crop_img = cv2.resize(self.crop_img, (304, 130), cv2.INTER_CUBIC)\n if self.isGlobal == False:\n cropped_img = QtGui.QImage(self.resized_crop_img, 304, 130, QtGui.QImage.Format_RGB888)\n pixmap = QtGui.QPixmap()\n pixmap.convertFromImage(cropped_img.rgbSwapped())\n if (i == 0):\n self.MainWindow.viewer_counter1.setPixmap(pixmap)\n if (i == 1):\n self.MainWindow.viewer_counter2.setPixmap(pixmap)\n if (i == 2):\n self.MainWindow.viewer_counter3.setPixmap(pixmap)\n self.imgs.append(self.crop_img)\n\n #Habilitar boton\n self.MainWindow.OCR_button.setEnabled(True)", "def state_2_hack(self):\n\n\n\n cup1_pos = self.myscene.get_cup_position(\"Cup_1\")\n cup2_pos = self.myscene.get_cup_position(\"Cup_2\")\n cup3_pos = self.myscene.get_cup_position(\"Cup_3\")\n \n #ppicking cups \n cv_tolr = 0.02\n pickPosList = [ (cup1_pos.x+cv_tolr,cup1_pos.y,-0.04),\n (cup2_pos.x+cv_tolr,cup2_pos.y,-0.04),\n (cup3_pos.x+cv_tolr,cup3_pos.y,-0.04)]\n\n \n\n # set cup location \n # if(self.hack_id==0):\n # for i in range(3):\n\n # pos = Pose().position\n # pos.x = pickPosList[i][0]\n # pos.y = pickPosList[i][1]\n # pos.z = pickPosList[i][2]\n # cup_name = \"Cup_\" + str(i+1)\n # self.myscene.add_cup(cup_name,pos)\n\n # placing cup location\n placePosList = [(0.75, -0.05 ,-0.04),\n (0.75, 0.05 ,-0.04),\n (0.74, 0.0 , 0.12 )]\n\n cup_name = \"Cup_\" + str(self.hack_id+1)\n\n #Open gripper\n rospy.logerr(\"Open grippes\")\n self.gripper_control(state=True,gripper=\"both\")\n\n\n # got to graspiping position\n self.grab(pickPosList[self.hack_id])\n\n\n # self.myscene.attach_cup(\"left_hand\", self.robot, cup_name)\n\n\n # # got at place position\n # self.grab(placePosList[self.hack_id],self.RIGHT_HOME)\n left_current = self.both_arms_group.get_current_pose(end_effector_link = \"left_gripper\")\n pos = left_current.pose.position\n cuurt_pose = (pos.x,pos.y,pos.z)\n self.grab_place(placePosList[self.hack_id],cuurt_pose)\n # self.grab_above(placePosList[self.hack_id],self.RIGHT_HOME)\n\n time.sleep(1)\n #Open gripper\n rospy.logerr(\"Open grippes\")\n self.gripper_control(state=True,gripper=\"both\")\n # self.myscene.detach_cup(cup_name, \"left_hand\")", "def animate_ow_brick_block(self):\n rise_until = 8\n drop_until = 16\n if self.is_hit:\n # Small push up\n if self.animation_px_counter < rise_until:\n self.rect.y -= 1\n self.block_frame = 1\n self.animation_px_counter += 1\n # Push back down\n elif self.animation_px_counter < drop_until:\n self.rect.y += 1\n self.animation_px_counter += 1\n else:\n self.block_frame = 0", "def win_animation(self):\n pass", "def getClip(self, index):\r\n\t\treturn None", "def test_camera_can_not_retract_past_boundary(self):\n camera = Camera(100, 50)\n camera.set_boundary(200, 100)\n camera.look_at(-200, -100)\n\n self.assertEqual(0, camera.x, 'Camera receeded past horizontal bounds')\n self.assertEqual(0, camera.y, 'Camera receeded past vertical bounds')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a class_element and converts it into a sourcecode string.
def class_to_python(class_element): lines = [] attrs = class_element.attrs for attr_nm, type_ref in attrs.iteritems(): lines.append(class_annotation(attr_nm, type_ref)) extends = class_element.extends name = class_element.name if not extends is None: lines.append('@extending(%s, named=\'%s\')' % (extends, name)) if class_element.isabstract: lines.append('@subtyped(using=\'@type\')') if extends is None: superclass = 'object' else: superclass = extends lines.append('class %s(%s): pass' % (name, superclass)) return '\n'.join(lines)
[ "def _compile_class(self) -> None:\n self.file_obj.write(\" \" * self.indent + \"<class>\\n\")\n self._increase_indent()\n self._eat(\"class\")\n self._compile_class_name()\n self._eat(\"{\")\n self._compile_class_var_dec()\n self._compile_subroutine_dec()\n self._eat(\"}\")\n self._decrease_indent()\n self.file_obj.write(\" \" * self.indent + \"</class>\\n\")", "def as_string(self, element):\n raise NotImplementedError()", "def make_class_ast(src):\n return python.AstTree(ast.parse(src)).classes()[0]", "def genClassCode(self):\r\n \r\n # Generate _fields.\r\n fieldsstr = self.genFieldsStr()\r\n \r\n # Parse annotations.\r\n self.parseAnnStr()\r\n \r\n tstr = self.genTypesStr()\r\n \r\n attrstr = fieldsstr + tstr\r\n \r\n return self.classtemplate.format(self.classname, self.basename, attrstr)", "def class_as_str(classes: dict, class_index: int) -> str:\n\n return classes[class_index]['name']", "def _get_class_string(self):\n\n classes = self.attrs.get(\"class\", None)\n\n # No classes were set in the attributes\n if not classes:\n return \" \".join(self.classes)\n\n classes = classes.value\n\n # Make room for the classes set in the tag\n if self.classes:\n classes += \" \"\n\n classes += \" \".join(self.classes)\n\n return classes", "def compile(self, element=None):\n if element:\n return ' '.join([self.value, element])\n else:\n return self.value", "def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")", "def compile_class(self):\n self.xml_lines.append(\"<class>\")\n # keyword: class\n # identifier: name of class\n # symbol: {\n self.append_xml_lines(3)\n # compile the variable declarations part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods or functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine()\n # symbol: }\n self.append_next_xml_line()\n self.xml_lines.append(\"</class>\")", "def to_string(element: Element) -> str:\n\n assert_is_element(element)\n return _DefParser.serialize(element)", "def compile_element(self, element, namespace_map=None):\n escape_text = util.escape_text\n blocks_module = self.blocks_module\n lineno = element.sourceline\n \n # Identify the element's type\n if element.tag is etree.ProcessingInstruction:\n # Processing instruction\n code = element.text\n block = blocks_module.DummyBlock(lineno)\n block.append(blocks_module.StaticCodeBlock(lineno, code))\n if constants.GENERATE_DEBUG_COMMENTS:\n block.template_line = ''\n \n elif element.tag is etree.Comment:\n # XML comment\n block = blocks_module.DummyBlock(lineno)\n \n if (not self.remove_html_comments and\n not element.text.lstrip().startswith('!')):\n \n # It is not a Genshi comment and we need to keep it in the output\n if self.process_html_comments:\n \n # Substitute template variables inside comments\n block.append(blocks_module.MarkupBlock(lineno, '<!--'))\n self.compile_text(lineno, block, element.text)\n block.append(blocks_module.MarkupBlock(lineno, '-->'))\n else:\n \n # Do not substitute template variables inside comments\n block.append(\n blocks_module.MarkupBlock(\n lineno, '<!--%s-->' % element.text))\n \n if constants.GENERATE_DEBUG_COMMENTS:\n block.template_line = self.template_lines[lineno]\n \n elif element.tag is etree.Entity:\n # Processing instruction\n block = blocks_module.MarkupBlock(lineno, element.text)\n if constants.GENERATE_DEBUG_COMMENTS:\n block.template_line = self.template_lines[lineno]\n \n else:\n # Element, including Genshi directives\n assert isinstance(element.tag, basestring), 'Unknown element: %r' % element\n \n # Is this element i18n translatable?\n lc_tagname_with_namespace_prefix = (\n util.namespace_url_to_prefix(self.namespace_map, element.tag))\n translatable_element = (\n lc_tagname_with_namespace_prefix in self.translatable_element_set)\n \n # Translate genshi element to their attribute format in place\n element_translator = self.element_translator_map.get(element.tag)\n if element_translator:\n # It is a Genshi element, so translate it to an attribute\n element_translator(element)\n \n # Collect Genshi directives defined as attributes while respecting\n # Genshi's processing order. Genshi specific attributes are removed\n # from the element in the process. The resulting list is in reverse\n # processing order, since we build up the generated code from the\n # deeper structure to the top level one.\n genshi_attributes = []\n directive_compiler = None\n for attribute_name in constants.GENSHI_ATTRIBUTES_WITH_URL:\n attribute_value = element.attrib.pop(attribute_name, None)\n if attribute_value is not None:\n directive_compiler = self.attribute_compiler_map.get(attribute_name)\n genshi_attributes.append((directive_compiler, attribute_value.strip()))\n \n # Create the block corresponding to the current element\n # NOTE: Namespaces has to be declared in each of the child elements\n # if we can't declare them in the parent, since the parent is\n # a Genshi directive element and won't go to the output.\n if element_translator:\n # It is a Genshi directive element\n block = blocks_module.DummyBlock(lineno)\n child_namespace_map = namespace_map\n else:\n # It is a non-Genshi element\n block = self.compile_foreign_element(\n element, namespace_map, translatable_element)\n child_namespace_map = None\n \n # Substitute variables into the enclosed text if any\n if element.text:\n self.compile_text(lineno, block, element.text, translatable=translatable_element)\n \n # Recursively compile child elements\n for child_element in element.iterchildren():\n block.append(self.compile_element(child_element, child_namespace_map))\n \n # Mark as a compiled element\n block.element = block\n element_block = block\n \n # Apply the Genshi directives\n for directive_compiler, attribute_value in genshi_attributes:\n block = directive_compiler(block, element, attribute_value)\n block.element = element_block\n \n # Substitute variables into the trailing text if any\n if element.tail:\n element_block = block\n block = self.blocks_module.DummyBlock(lineno)\n block.append(element_block)\n \n # Is the parent element i18n translatable?\n translatable_parent_element = False\n parent_element = element.getparent()\n if parent_element is not None:\n lc_tagname_with_namespace_prefix = util.namespace_url_to_prefix(\n self.namespace_map, parent_element.tag)\n translatable_parent_element = (\n lc_tagname_with_namespace_prefix in self.translatable_element_set)\n \n self.compile_text(lineno, block, element.tail, translatable=translatable_parent_element)\n \n return block", "def pycode(self, className=None, classComment=None, derived=vltModulePref+\"Process\"):\n if className is None:\n #take the file name (without extention) as default for className\n className = os.path.splitext(os.path.split( self.name)[1])[0]\n if re.search( \"[^a-zA-Z0-9_]\" , className) or re.search( \"[^a-zA-Z_]\" , className[0]):\n raise TypeError(\"Cannot convert filename '%s' to python class name, contains invalid caracheters, please provide a class name\"%(className))\n return dict2py(self.commands, className, derived=derived,\n classComment=classComment, fileName=self.name)", "def classToName(a_class: Type[Any]) -> str:\n if issubclass(a_class, computedobject.ComputedObject):\n return a_class.name()\n elif issubclass(a_class, numbers.Number):\n return 'Number'\n elif issubclass(a_class, str):\n return 'String'\n elif issubclass(a_class, (list, tuple)):\n return 'Array'\n elif issubclass(a_class, datetime.datetime):\n return 'Date'\n else:\n return 'Object'", "def tostring(element):\n\n return ElementTree.tostring(element, encoding=\"unicode\",\n short_empty_elements=False)", "def code(self, elem, theme, inline=True):\n if inline:\n return theme.inline.render(elem.text)\n else:\n text = elem.text\n language = None\n\n if 'class' in elem.attrib:\n language = elem.attrib['class']\n\n if code_has_language(text):\n text, language = code_parse(text)\n\n style = self.style if theme.block.highlight else None\n\n return highlight_code(text, language, style,\n self.format).split('\\n')", "def _render_class_name(coordinate, path):\n return (\n path.replace(coordinate[\"path\"], \"\")\n .replace(\".html\", \"\")\n .replace(\".java\", \"\")\n .replace(\"/\", \".\")[1:]\n )", "def to_source(self) -> str:\n return astor.to_source(self.module_ast)", "def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')", "def class_name(self):\n return self.source_file.rsplit('.', maxsplit=1)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the class property annotation for the given name and type_ref. This function dispatches the call based on whether the type_ref is a builtin primitive or if it is a complex datatype (either list, map or custom class).
def class_annotation(nm, type_ref): if type_ref.type_ in python_primitives: return simple_attr_annotation(nm, type_ref) else: return complex_attr_annotation(nm,type_ref)
[ "def complex_attr_annotation(nm, type_ref):\n marshalfun, unmarshalfun = type_ref_marshal_funs(type_ref)\n return '@cprop.%s(%s, %s)' % (nm, marshalfun, unmarshalfun)", "def simple_attr_annotation(nm, type_ref):\n assert type_ref.type_ in python_primitives\n return '@sprop.%s #%s' % (nm, type_ref.type_)", "def _get_reference_prop(self, prop):\n ref, _ = prop.stmt.i_leafref_ptr\n refprop, refclass = ref.i_property, ref.parent.i_class\n return refprop, refclass", "def to_class(class_reference):\r\n if isinstance(class_reference, str):\r\n return getattr(DB, class_reference)\r\n if isinstance(class_reference, type):\r\n return class_reference\r\n raise RpwTypeError('Class Type, Class Type Name', type(class_reference))", "def type_ref_id(self):\n return self.__type_ref_id", "def determinePropertyTypeConstant(value):\r\n \r\n typeDisplayName = None\r\n valueType = type(value)\r\n for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems():\r\n if valueType in availableTypes:\r\n typeDisplayName = typeName\r\n break\r\n \r\n if typeDisplayName is None:\r\n typeDisplayName = \\\r\n \"%s.%s\" % (value.__class__.__module__, value.__class__.__name__)\r\n return typeDisplayName", "def get_genotype_class(rec, ref, alt):\n if len(ref) == len(alt):\n return 'SNP'\n elif len(ref) < len(alt):\n return 'IN'\n elif len(ref) > len(alt):\n return 'DEL'\n else:\n raise ValueError('INVALID GENOTYPE CLASS \\n' + str(rec))", "def get_class(type_name: Optional[str]) -> Optional[type]:\n if type_name is None:\n return None\n\n components = type_name.split(\".\")\n module_name = components[0]\n\n _is_module_allowed(module_name, type_name)\n\n module = __import__(module_name)\n\n return get_attribute(module, components[1:])", "def _set_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}},), is_leaf=True, yang_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}},), is_leaf=True, yang_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}},), is_leaf=True, yang_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:ONE_RATE_TWO_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}, 'oc-qos-types:TWO_RATE_THREE_COLOR': {'@module': 'openconfig-qos-types', '@namespace': 'http://openconfig.net/yang/qos-types'}},), is_leaf=True, yang_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__type = t\n if hasattr(self, '_set'):\n self._set()", "def PropertyType(self) -> _n_2_t_4:", "def __getattr__(self, type_name):\n _type = super(Schema, self).get_type(type_name)\n if _type is None:\n raise AttributeError('Type \"{}\" not found in the Schema'.format(type_name))\n if isinstance(_type, GrapheneGraphQLType):\n return _type.graphene_type\n return _type", "def customtype(self):\n result = None\n if self.is_custom:\n #Look for the module that declares this variable's kind in its public list.\n self.dependency()\n if self._kind_module is not None:\n if self.kind.lower() in self._kind_module.types:\n result = self._kind_module.types[self.kind.lower()]\n\n return result", "def _is_attribute_type_for_get_required(get_type, cmo_type):\n if not _is_type_an_unknown_type(get_type):\n return get_type\n if not _is_type_an_unknown_type(cmo_type):\n return cmo_type\n return 'Unable to determine attribute type'", "def _get_requirement(r_ref):\n try:\n r_ref = r_ref.meta\n except AttributeError as err:\n pass\n r = _DOC_CACHE_1[r_ref.id]\n r.meta.type = r_ref.type\n\n return r", "def type_class(self):\n return type_get_class_name(type_get_class(self))", "def typeOf(self, name):\n scope = self.class_scope + self.subroutine_scope\n named_items = [n for n in scope if n[\"name\"] == name]\n return named_items[0][\"type\"] if len(named_items) else None", "def class_abbrev(type):\n ...", "def formats(typestr):\n def decorator(cls):\n if not inspect.isclass(cls):\n raise ValueError(\"The @formats decorator is only valid for classes\")\n attrname_format = '__formats{}'\n index = 0\n while hasattr(cls, attrname_format.format(index)):\n index += 1\n setattr(cls, attrname_format.format(index), FormatType(typestr))\n return cls\n return decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a simple class property annotation for the given name and type_ref.
def simple_attr_annotation(nm, type_ref): assert type_ref.type_ in python_primitives return '@sprop.%s #%s' % (nm, type_ref.type_)
[ "def class_annotation(nm, type_ref):\n if type_ref.type_ in python_primitives:\n return simple_attr_annotation(nm, type_ref)\n else:\n return complex_attr_annotation(nm,type_ref)", "def complex_attr_annotation(nm, type_ref):\n marshalfun, unmarshalfun = type_ref_marshal_funs(type_ref)\n return '@cprop.%s(%s, %s)' % (nm, marshalfun, unmarshalfun)", "def get_attribute_name(self, instance):\n if isinstance(instance, Indexed):\n property_tmpl = instance.PROPERTY_TEMPLATE\n else:\n property_tmpl = u\"{}\"\n\n if self.with_template:\n return property_tmpl.format(self.alias)\n return self.alias", "def determinePropertyTypeConstant(value):\r\n \r\n typeDisplayName = None\r\n valueType = type(value)\r\n for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems():\r\n if valueType in availableTypes:\r\n typeDisplayName = typeName\r\n break\r\n \r\n if typeDisplayName is None:\r\n typeDisplayName = \\\r\n \"%s.%s\" % (value.__class__.__module__, value.__class__.__name__)\r\n return typeDisplayName", "def class_abbrev(type):\n ...", "def build_simple_ref(name, element_type):\n if element_type == TIME:\n # use DTP for time/duration instead\n return DateTimeParser.parse_as_ref(name)\n rb_class = ref_builder_map.get(element_type)\n assert rb_class, f\"invalid element type: {element_type}\"\n rb = rb_class()\n rb.set_link(name, Indexer.simple_lookup(name, element_type))\n rb.add_name(name)\n return rb.build()", "def _get_reference_prop(self, prop):\n ref, _ = prop.stmt.i_leafref_ptr\n refprop, refclass = ref.i_property, ref.parent.i_class\n return refprop, refclass", "def _m_mangled_attr_name (name, cls_name) :\n if cls_name.startswith (\"_\") :\n format = \"%s__%s\"\n else :\n format = \"_%s__%s\"\n return format % (cls_name, name)", "def render_record_annotation(self):\n return '{}: {}'.format(self.name, self.type_str)", "def PropertyType(self) -> _n_2_t_4:", "def get_name(typ):\n if type(typ) is type:\n return typ.__name__\n else:\n group = re.search(r\"^typing.(\\w+)(\\[|$)\", str(typ))\n if group is not None:\n return group[1].lower()\n else:\n raise Exception(\"Could not get the name of type '\" + str(typ) + \"'\")", "def formats(typestr):\n def decorator(cls):\n if not inspect.isclass(cls):\n raise ValueError(\"The @formats decorator is only valid for classes\")\n attrname_format = '__formats{}'\n index = 0\n while hasattr(cls, attrname_format.format(index)):\n index += 1\n setattr(cls, attrname_format.format(index), FormatType(typestr))\n return cls\n return decorator", "def createPropertyType(propertyTypeName, restrictions=dict()): # pylint: disable=W0142\r\n \r\n if propertyTypeName in _propertyNameClassMap:\r\n try:\r\n return _propertyNameClassMap[propertyTypeName](**restrictions)\r\n except TypeError:\r\n raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName)\r\n else:\r\n return DomainObjectType(propertyTypeName)", "def get_genotype_class(rec, ref, alt):\n if len(ref) == len(alt):\n return 'SNP'\n elif len(ref) < len(alt):\n return 'IN'\n elif len(ref) > len(alt):\n return 'DEL'\n else:\n raise ValueError('INVALID GENOTYPE CLASS \\n' + str(rec))", "def _get_type_name(member):\n if member.type == ParsedObjectType.String or member.type == ParsedObjectType.Float:\n return member.type.name.lower()\n elif member.type == ParsedObjectType.Int:\n return \"long\"\n elif member.type == ParsedObjectType.Bool:\n return \"bool\"\n elif member.type == ParsedObjectType.Array:\n return \"List<{0}>\".format(_get_type_name(member.data[0]))\n else:\n return _capitalize(member.name)", "def get_attr( # noqa: F811\n self,\n name,\n *,\n data_type=str,\n optional=False,\n can_be_str=True,\n ):\n raise NotImplementedError()", "def type_ref_id(self):\n return self.__type_ref_id", "def class_name(type_str):\n return _CLASS_NAME.findall(type_str)[0]", "def __str__(self) -> str:\n associated_type = (\n ': \"{0}\"'.format(self._associated_type.__qualname__)\n if self._associated_type\n else ''\n )\n return '<typeclass \"{0}\"{1}>'.format(\n self._signature.__name__,\n associated_type,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a complex class property annotation for the given name and type ref.
def complex_attr_annotation(nm, type_ref): marshalfun, unmarshalfun = type_ref_marshal_funs(type_ref) return '@cprop.%s(%s, %s)' % (nm, marshalfun, unmarshalfun)
[ "def class_annotation(nm, type_ref):\n if type_ref.type_ in python_primitives:\n return simple_attr_annotation(nm, type_ref)\n else:\n return complex_attr_annotation(nm,type_ref)", "def simple_attr_annotation(nm, type_ref):\n assert type_ref.type_ in python_primitives\n return '@sprop.%s #%s' % (nm, type_ref.type_)", "def _get_reference_prop(self, prop):\n ref, _ = prop.stmt.i_leafref_ptr\n refprop, refclass = ref.i_property, ref.parent.i_class\n return refprop, refclass", "def build_simple_ref(name, element_type):\n if element_type == TIME:\n # use DTP for time/duration instead\n return DateTimeParser.parse_as_ref(name)\n rb_class = ref_builder_map.get(element_type)\n assert rb_class, f\"invalid element type: {element_type}\"\n rb = rb_class()\n rb.set_link(name, Indexer.simple_lookup(name, element_type))\n rb.add_name(name)\n return rb.build()", "def PropertyType(self) -> _n_2_t_4:", "def get_property_node(prop):\n node = core.Element('property')\n node['name'] = prop[0]\n doc = inspect.getdoc(prop[1])\n if doc is not None:\n node.append_child(\n core.Element('doc').append_child(\n core.CData(doc)\n )\n )\n return node", "def get_attribute_name(self, instance):\n if isinstance(instance, Indexed):\n property_tmpl = instance.PROPERTY_TEMPLATE\n else:\n property_tmpl = u\"{}\"\n\n if self.with_template:\n return property_tmpl.format(self.alias)\n return self.alias", "def determinePropertyTypeConstant(value):\r\n \r\n typeDisplayName = None\r\n valueType = type(value)\r\n for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems():\r\n if valueType in availableTypes:\r\n typeDisplayName = typeName\r\n break\r\n \r\n if typeDisplayName is None:\r\n typeDisplayName = \\\r\n \"%s.%s\" % (value.__class__.__module__, value.__class__.__name__)\r\n return typeDisplayName", "def create_extended_property(self, name='', value='',\n property_type='Attribute'):\n prop = _ExtendedProperty(self)\n prop.name = name\n prop.value = value\n prop.type = property_type\n self.extended_properties.append(prop)", "def _m_mangled_attr_name (name, cls_name) :\n if cls_name.startswith (\"_\") :\n format = \"%s__%s\"\n else :\n format = \"_%s__%s\"\n return format % (cls_name, name)", "def render_record_annotation(self):\n return '{}: {}'.format(self.name, self.type_str)", "def createPropertyType(propertyTypeName, restrictions=dict()): # pylint: disable=W0142\r\n \r\n if propertyTypeName in _propertyNameClassMap:\r\n try:\r\n return _propertyNameClassMap[propertyTypeName](**restrictions)\r\n except TypeError:\r\n raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName)\r\n else:\r\n return DomainObjectType(propertyTypeName)", "def gen_getter(keyval_dict,key):\n if not(key in keyval_dict):\n name = '_'+key\n if not(name in keyval_dict):\n raise KeyError('Property with name: {0} is not among the class properties '.format(key));\n else:\n name = key\n\n a_val= keyval_dict[name];\n if isinstance(a_val,ComplexProperty):\n return a_val.__get__(keyval_dict);\n else:\n return a_val\n\n\n #end", "def get_attr( # noqa: F811\n self,\n name,\n *,\n data_type=str,\n optional=False,\n can_be_str=True,\n ):\n raise NotImplementedError()", "def make_complex_polygon(\n class_name: str,\n point_paths: List[List[Point]],\n bounding_box: Optional[Dict] = None,\n subs: Optional[List[SubAnnotation]] = None,\n slot_names: Optional[List[str]] = None,\n) -> Annotation:\n return Annotation(\n AnnotationClass(class_name, \"complex_polygon\", \"polygon\"),\n _maybe_add_bounding_box_data({\"paths\": point_paths}, bounding_box),\n subs or [],\n slot_names=slot_names or [],\n )", "def generate_field(name, data):\n assert 'type' in data\n field = TYPES_TO_FIELDS.get(data['type'], Unknown)()\n return field", "def property(line_name, property_name):\n return(cp.get(line_name, property_name))", "def complex_type(self):\n return self._complex_type", "def class_abbrev(type):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }